1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20ddbccd1SRussell King /* 30ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 40ddbccd1SRussell King * 50ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 60ddbccd1SRussell King * 70ddbccd1SRussell King * DMA uncached mapping support. 80ddbccd1SRussell King */ 90ddbccd1SRussell King #include <linux/module.h> 100ddbccd1SRussell King #include <linux/mm.h> 1136d0fd21SLaura Abbott #include <linux/genalloc.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 130ddbccd1SRussell King #include <linux/errno.h> 140ddbccd1SRussell King #include <linux/list.h> 150ddbccd1SRussell King #include <linux/init.h> 160ddbccd1SRussell King #include <linux/device.h> 17249baa54SChristoph Hellwig #include <linux/dma-direct.h> 180ddbccd1SRussell King #include <linux/dma-mapping.h> 19ad3c7b18SChristoph Hellwig #include <linux/dma-noncoherent.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 28a254129eSJoonsoo Kim #include <linux/cma.h> 290ddbccd1SRussell King 300ddbccd1SRussell King #include <asm/memory.h> 3143377453SNicolas Pitre #include <asm/highmem.h> 320ddbccd1SRussell King #include <asm/cacheflush.h> 330ddbccd1SRussell King #include <asm/tlbflush.h> 3499d1717dSJon Medhurst #include <asm/mach/arch.h> 354ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 36c7909509SMarek Szyprowski #include <asm/mach/map.h> 37c7909509SMarek Szyprowski #include <asm/system_info.h> 38c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 390e0d26e7SChristoph Hellwig #include <xen/swiotlb-xen.h> 400ddbccd1SRussell King 411234e3fdSRussell King #include "dma.h" 42022ae537SRussell King #include "mm.h" 43022ae537SRussell King 44b4268676SRabin Vincent struct arm_dma_alloc_args { 45b4268676SRabin Vincent struct device *dev; 46b4268676SRabin Vincent size_t size; 47b4268676SRabin Vincent gfp_t gfp; 48b4268676SRabin Vincent pgprot_t prot; 49b4268676SRabin Vincent const void *caller; 50b4268676SRabin Vincent bool want_vaddr; 51f1270896SGregory CLEMENT int coherent_flag; 52b4268676SRabin Vincent }; 53b4268676SRabin Vincent 54b4268676SRabin Vincent struct arm_dma_free_args { 55b4268676SRabin Vincent struct device *dev; 56b4268676SRabin Vincent size_t size; 57b4268676SRabin Vincent void *cpu_addr; 58b4268676SRabin Vincent struct page *page; 59b4268676SRabin Vincent bool want_vaddr; 60b4268676SRabin Vincent }; 61b4268676SRabin Vincent 62f1270896SGregory CLEMENT #define NORMAL 0 63f1270896SGregory CLEMENT #define COHERENT 1 64f1270896SGregory CLEMENT 65b4268676SRabin Vincent struct arm_dma_allocator { 66b4268676SRabin Vincent void *(*alloc)(struct arm_dma_alloc_args *args, 67b4268676SRabin Vincent struct page **ret_page); 68b4268676SRabin Vincent void (*free)(struct arm_dma_free_args *args); 69b4268676SRabin Vincent }; 70b4268676SRabin Vincent 7119e6e5e5SRabin Vincent struct arm_dma_buffer { 7219e6e5e5SRabin Vincent struct list_head list; 7319e6e5e5SRabin Vincent void *virt; 74b4268676SRabin Vincent struct arm_dma_allocator *allocator; 7519e6e5e5SRabin Vincent }; 7619e6e5e5SRabin Vincent 7719e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs); 7819e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock); 7919e6e5e5SRabin Vincent 8019e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 8119e6e5e5SRabin Vincent { 8219e6e5e5SRabin Vincent struct arm_dma_buffer *buf, *found = NULL; 8319e6e5e5SRabin Vincent unsigned long flags; 8419e6e5e5SRabin Vincent 8519e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 8619e6e5e5SRabin Vincent list_for_each_entry(buf, &arm_dma_bufs, list) { 8719e6e5e5SRabin Vincent if (buf->virt == virt) { 8819e6e5e5SRabin Vincent list_del(&buf->list); 8919e6e5e5SRabin Vincent found = buf; 9019e6e5e5SRabin Vincent break; 9119e6e5e5SRabin Vincent } 9219e6e5e5SRabin Vincent } 9319e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 9419e6e5e5SRabin Vincent return found; 9519e6e5e5SRabin Vincent } 9619e6e5e5SRabin Vincent 9715237e1fSMarek Szyprowski /* 9815237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 9915237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 10015237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 10115237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 10215237e1fSMarek Szyprowski * 10315237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 10415237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 10515237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 10615237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 10715237e1fSMarek Szyprowski * 10815237e1fSMarek Szyprowski */ 10951fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 11015237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11151fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 11215237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11315237e1fSMarek Szyprowski 1142dc6a016SMarek Szyprowski /** 1152dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 1162dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1172dc6a016SMarek Szyprowski * @page: page that buffer resides in 1182dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 1192dc6a016SMarek Szyprowski * @size: size of buffer to map 1202dc6a016SMarek Szyprowski * @dir: DMA transfer direction 1212dc6a016SMarek Szyprowski * 1222dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 1232dc6a016SMarek Szyprowski * or written back. 1242dc6a016SMarek Szyprowski * 1252dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 1262dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 1272dc6a016SMarek Szyprowski */ 12851fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 1292dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 13000085f1eSKrzysztof Kozlowski unsigned long attrs) 1312dc6a016SMarek Szyprowski { 13200085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 13351fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 13451fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1352dc6a016SMarek Szyprowski } 1362dc6a016SMarek Szyprowski 137dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 138dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 13900085f1eSKrzysztof Kozlowski unsigned long attrs) 140dd37e940SRob Herring { 141dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 142dd37e940SRob Herring } 143dd37e940SRob Herring 1442dc6a016SMarek Szyprowski /** 1452dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 1462dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1472dc6a016SMarek Szyprowski * @handle: DMA address of buffer 1482dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 1492dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 1502dc6a016SMarek Szyprowski * 1512dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 1522dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 1532dc6a016SMarek Szyprowski * All other usages are undefined. 1542dc6a016SMarek Szyprowski * 1552dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1562dc6a016SMarek Szyprowski * whatever the device wrote there. 1572dc6a016SMarek Szyprowski */ 15851fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 15900085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 1602dc6a016SMarek Szyprowski { 16100085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16251fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 16351fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1642dc6a016SMarek Szyprowski } 1652dc6a016SMarek Szyprowski 16651fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1672dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1682dc6a016SMarek Szyprowski { 1692dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1702dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1712dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1722dc6a016SMarek Szyprowski } 1732dc6a016SMarek Szyprowski 17451fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1752dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1762dc6a016SMarek Szyprowski { 1772dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1782dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1792dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1802dc6a016SMarek Szyprowski } 1812dc6a016SMarek Szyprowski 1825299709dSBart Van Assche const struct dma_map_ops arm_dma_ops = { 183f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 184f99d6034SMarek Szyprowski .free = arm_dma_free, 185f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 186dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1872dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1882dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1892dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1902dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 191cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 1922dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1932dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1942dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1952dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 196418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 197249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 1982dc6a016SMarek Szyprowski }; 1992dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 2002dc6a016SMarek Szyprowski 201dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 20200085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 203dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 20400085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs); 20555af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 20655af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 20700085f1eSKrzysztof Kozlowski unsigned long attrs); 208dd37e940SRob Herring 2095299709dSBart Van Assche const struct dma_map_ops arm_coherent_dma_ops = { 210dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 211dd37e940SRob Herring .free = arm_coherent_dma_free, 21255af8a91SMike Looijmans .mmap = arm_coherent_dma_mmap, 213dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 214dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 215dd37e940SRob Herring .map_sg = arm_dma_map_sg, 216cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 217418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 218249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 219dd37e940SRob Herring }; 220dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 221dd37e940SRob Herring 222fd509249SChristoph Hellwig static int __dma_supported(struct device *dev, u64 mask) 2239f28cde0SRussell King { 224f3cc4e1dSChen-Yu Tsai unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); 2259f28cde0SRussell King 2269f28cde0SRussell King /* 2279f28cde0SRussell King * Translate the device's DMA mask to a PFN limit. This 2289f28cde0SRussell King * PFN number includes the page which we can DMA to. 2299f28cde0SRussell King */ 230fd509249SChristoph Hellwig if (dma_to_pfn(dev, mask) < max_dma_pfn) 2319f28cde0SRussell King return 0; 2329f28cde0SRussell King return 1; 2339f28cde0SRussell King } 2349f28cde0SRussell King 235f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 236c7909509SMarek Szyprowski { 237c7909509SMarek Szyprowski /* 238c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 239c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 240c7909509SMarek Szyprowski */ 2419848e48fSMarek Szyprowski if (PageHighMem(page)) { 2429848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2439848e48fSMarek Szyprowski phys_addr_t end = base + size; 2449848e48fSMarek Szyprowski while (size > 0) { 2459848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2469848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 247f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2489848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2499848e48fSMarek Szyprowski kunmap_atomic(ptr); 2509848e48fSMarek Szyprowski page++; 2519848e48fSMarek Szyprowski size -= PAGE_SIZE; 2529848e48fSMarek Szyprowski } 253f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2549848e48fSMarek Szyprowski outer_flush_range(base, end); 2559848e48fSMarek Szyprowski } else { 2569848e48fSMarek Szyprowski void *ptr = page_address(page); 257c7909509SMarek Szyprowski memset(ptr, 0, size); 258f1270896SGregory CLEMENT if (coherent_flag != COHERENT) { 259c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 260c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 261c7909509SMarek Szyprowski } 2624ce63fcdSMarek Szyprowski } 263f1270896SGregory CLEMENT } 264c7909509SMarek Szyprowski 2657a9a32a9SRussell King /* 2667a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2677a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2687a9a32a9SRussell King */ 269f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 270f1270896SGregory CLEMENT gfp_t gfp, int coherent_flag) 2717a9a32a9SRussell King { 2727a9a32a9SRussell King unsigned long order = get_order(size); 2737a9a32a9SRussell King struct page *page, *p, *e; 2747a9a32a9SRussell King 2757a9a32a9SRussell King page = alloc_pages(gfp, order); 2767a9a32a9SRussell King if (!page) 2777a9a32a9SRussell King return NULL; 2787a9a32a9SRussell King 2797a9a32a9SRussell King /* 2807a9a32a9SRussell King * Now split the huge page and free the excess pages 2817a9a32a9SRussell King */ 2827a9a32a9SRussell King split_page(page, order); 2837a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2847a9a32a9SRussell King __free_page(p); 2857a9a32a9SRussell King 286f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 2877a9a32a9SRussell King 2887a9a32a9SRussell King return page; 2897a9a32a9SRussell King } 2907a9a32a9SRussell King 2917a9a32a9SRussell King /* 2927a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 2937a9a32a9SRussell King */ 2947a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 2957a9a32a9SRussell King { 2967a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 2977a9a32a9SRussell King 2987a9a32a9SRussell King while (page < e) { 2997a9a32a9SRussell King __free_page(page); 3007a9a32a9SRussell King page++; 3017a9a32a9SRussell King } 3027a9a32a9SRussell King } 3037a9a32a9SRussell King 304c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 3059848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 306f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 307712c604dSLucas Stach int coherent_flag, gfp_t gfp); 308c7909509SMarek Szyprowski 309e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 310e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 3116e8266e3SCarlo Caione const void *caller, bool want_vaddr); 312e9da6e99SMarek Szyprowski 3136e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 314b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init; 3156e5267aaSMarek Szyprowski 316b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 317c7909509SMarek Szyprowski 318c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 319c7909509SMarek Szyprowski { 32036d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 321c7909509SMarek Szyprowski return 0; 322c7909509SMarek Szyprowski } 323c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 324c7909509SMarek Szyprowski 325c7909509SMarek Szyprowski /* 326c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 327c7909509SMarek Szyprowski */ 328e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 329c7909509SMarek Szyprowski { 33071b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 3319d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 332c7909509SMarek Szyprowski struct page *page; 333c7909509SMarek Szyprowski void *ptr; 334c7909509SMarek Szyprowski 33536d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 33636d0fd21SLaura Abbott if (!atomic_pool) 33736d0fd21SLaura Abbott goto out; 338f1270896SGregory CLEMENT /* 339f1270896SGregory CLEMENT * The atomic pool is only used for non-coherent allocations 340f1270896SGregory CLEMENT * so we must pass NORMAL for coherent_flag. 341f1270896SGregory CLEMENT */ 342e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 34336d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 344712c604dSLucas Stach &page, atomic_pool_init, true, NORMAL, 345712c604dSLucas Stach GFP_KERNEL); 346e9da6e99SMarek Szyprowski else 34736d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 3486e8266e3SCarlo Caione &page, atomic_pool_init, true); 349c7909509SMarek Szyprowski if (ptr) { 35036d0fd21SLaura Abbott int ret; 3516b3fe472SHiroshi Doyu 35236d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 35336d0fd21SLaura Abbott page_to_phys(page), 35436d0fd21SLaura Abbott atomic_pool_size, -1); 35536d0fd21SLaura Abbott if (ret) 35636d0fd21SLaura Abbott goto destroy_genpool; 3576b3fe472SHiroshi Doyu 35836d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 35936d0fd21SLaura Abbott gen_pool_first_fit_order_align, 360acb62448SVladimir Murzin NULL); 361bf31c5e0SFabio Estevam pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 36236d0fd21SLaura Abbott atomic_pool_size / 1024); 363c7909509SMarek Szyprowski return 0; 364c7909509SMarek Szyprowski } 365ec10665cSSachin Kamat 36636d0fd21SLaura Abbott destroy_genpool: 36736d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 36836d0fd21SLaura Abbott atomic_pool = NULL; 36936d0fd21SLaura Abbott out: 370bf31c5e0SFabio Estevam pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 37136d0fd21SLaura Abbott atomic_pool_size / 1024); 372c7909509SMarek Szyprowski return -ENOMEM; 373c7909509SMarek Szyprowski } 374c7909509SMarek Szyprowski /* 375c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 376c7909509SMarek Szyprowski */ 377e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 378c7909509SMarek Szyprowski 379c7909509SMarek Szyprowski struct dma_contig_early_reserve { 380c7909509SMarek Szyprowski phys_addr_t base; 381c7909509SMarek Szyprowski unsigned long size; 382c7909509SMarek Szyprowski }; 383c7909509SMarek Szyprowski 384c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 385c7909509SMarek Szyprowski 386c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 387c7909509SMarek Szyprowski 388c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 389c7909509SMarek Szyprowski { 390c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 391c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 392c7909509SMarek Szyprowski dma_mmu_remap_num++; 393c7909509SMarek Szyprowski } 394c7909509SMarek Szyprowski 395c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 396c7909509SMarek Szyprowski { 397c7909509SMarek Szyprowski int i; 398c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 399c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 400c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 401c7909509SMarek Szyprowski struct map_desc map; 402c7909509SMarek Szyprowski unsigned long addr; 403c7909509SMarek Szyprowski 404c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 405c7909509SMarek Szyprowski end = arm_lowmem_limit; 406c7909509SMarek Szyprowski if (start >= end) 40739f78e70SChris Brand continue; 408c7909509SMarek Szyprowski 409c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 410c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 411c7909509SMarek Szyprowski map.length = end - start; 412c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 413c7909509SMarek Szyprowski 414c7909509SMarek Szyprowski /* 4156b076991SRussell King * Clear previous low-memory mapping to ensure that the 4166b076991SRussell King * TLB does not see any conflicting entries, then flush 4176b076991SRussell King * the TLB of the old entries before creating new mappings. 4186b076991SRussell King * 4196b076991SRussell King * This ensures that any speculatively loaded TLB entries 4206b076991SRussell King * (even though they may be rare) can not cause any problems, 4216b076991SRussell King * and ensures that this code is architecturally compliant. 422c7909509SMarek Szyprowski */ 423c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 42461f6c7a4SVitaly Andrianov addr += PMD_SIZE) 425c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 426c7909509SMarek Szyprowski 4276b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 4286b076991SRussell King __phys_to_virt(end)); 4296b076991SRussell King 430c7909509SMarek Szyprowski iotable_init(&map, 1); 431c7909509SMarek Szyprowski } 432c7909509SMarek Szyprowski } 433c7909509SMarek Szyprowski 4348b1e0f81SAnshuman Khandual static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 435c7909509SMarek Szyprowski { 436c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 437c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 438c7909509SMarek Szyprowski 439c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 440c7909509SMarek Szyprowski return 0; 441c7909509SMarek Szyprowski } 442c7909509SMarek Szyprowski 443c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 444c7909509SMarek Szyprowski { 445c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 446c7909509SMarek Szyprowski unsigned end = start + size; 447c7909509SMarek Szyprowski 448c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 449c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 450c7909509SMarek Szyprowski } 451c7909509SMarek Szyprowski 452c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 453c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 4546e8266e3SCarlo Caione const void *caller, bool want_vaddr) 455c7909509SMarek Szyprowski { 456c7909509SMarek Szyprowski struct page *page; 4576e8266e3SCarlo Caione void *ptr = NULL; 458f1270896SGregory CLEMENT /* 459f1270896SGregory CLEMENT * __alloc_remap_buffer is only called when the device is 460f1270896SGregory CLEMENT * non-coherent 461f1270896SGregory CLEMENT */ 462f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 463c7909509SMarek Szyprowski if (!page) 464c7909509SMarek Szyprowski return NULL; 4656e8266e3SCarlo Caione if (!want_vaddr) 4666e8266e3SCarlo Caione goto out; 467c7909509SMarek Szyprowski 46878406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 469c7909509SMarek Szyprowski if (!ptr) { 470c7909509SMarek Szyprowski __dma_free_buffer(page, size); 471c7909509SMarek Szyprowski return NULL; 472c7909509SMarek Szyprowski } 473c7909509SMarek Szyprowski 4746e8266e3SCarlo Caione out: 475c7909509SMarek Szyprowski *ret_page = page; 476c7909509SMarek Szyprowski return ptr; 477c7909509SMarek Szyprowski } 478c7909509SMarek Szyprowski 479e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 480c7909509SMarek Szyprowski { 48136d0fd21SLaura Abbott unsigned long val; 482e9da6e99SMarek Szyprowski void *ptr = NULL; 483c7909509SMarek Szyprowski 48436d0fd21SLaura Abbott if (!atomic_pool) { 485e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 486c7909509SMarek Szyprowski return NULL; 487c7909509SMarek Szyprowski } 488c7909509SMarek Szyprowski 48936d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 49036d0fd21SLaura Abbott if (val) { 49136d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 492e9da6e99SMarek Szyprowski 49336d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 49436d0fd21SLaura Abbott ptr = (void *)val; 495e9da6e99SMarek Szyprowski } 496e9da6e99SMarek Szyprowski 497c7909509SMarek Szyprowski return ptr; 498c7909509SMarek Szyprowski } 499c7909509SMarek Szyprowski 50021d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 50121d0a759SHiroshi Doyu { 502964975acSHuang Shijie return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); 50321d0a759SHiroshi Doyu } 50421d0a759SHiroshi Doyu 505e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 506c7909509SMarek Szyprowski { 50721d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 508c7909509SMarek Szyprowski return 0; 509c7909509SMarek Szyprowski 51036d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 511e9da6e99SMarek Szyprowski 512c7909509SMarek Szyprowski return 1; 513c7909509SMarek Szyprowski } 514c7909509SMarek Szyprowski 515c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5169848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 517f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 518712c604dSLucas Stach int coherent_flag, gfp_t gfp) 519c7909509SMarek Szyprowski { 520c7909509SMarek Szyprowski unsigned long order = get_order(size); 521c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 522c7909509SMarek Szyprowski struct page *page; 5236e8266e3SCarlo Caione void *ptr = NULL; 524c7909509SMarek Szyprowski 525d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 526c7909509SMarek Szyprowski if (!page) 527c7909509SMarek Szyprowski return NULL; 528c7909509SMarek Szyprowski 529f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 530c7909509SMarek Szyprowski 5316e8266e3SCarlo Caione if (!want_vaddr) 5326e8266e3SCarlo Caione goto out; 5336e8266e3SCarlo Caione 5349848e48fSMarek Szyprowski if (PageHighMem(page)) { 53578406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 5369848e48fSMarek Szyprowski if (!ptr) { 5379848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 5389848e48fSMarek Szyprowski return NULL; 5399848e48fSMarek Szyprowski } 5409848e48fSMarek Szyprowski } else { 5419848e48fSMarek Szyprowski __dma_remap(page, size, prot); 5429848e48fSMarek Szyprowski ptr = page_address(page); 5439848e48fSMarek Szyprowski } 5446e8266e3SCarlo Caione 5456e8266e3SCarlo Caione out: 546c7909509SMarek Szyprowski *ret_page = page; 5479848e48fSMarek Szyprowski return ptr; 548c7909509SMarek Szyprowski } 549c7909509SMarek Szyprowski 550c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 5516e8266e3SCarlo Caione void *cpu_addr, size_t size, bool want_vaddr) 552c7909509SMarek Szyprowski { 5536e8266e3SCarlo Caione if (want_vaddr) { 5549848e48fSMarek Szyprowski if (PageHighMem(page)) 55578406ff5SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 5569848e48fSMarek Szyprowski else 55771b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 5586e8266e3SCarlo Caione } 559c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 560c7909509SMarek Szyprowski } 561c7909509SMarek Szyprowski 56200085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 563f99d6034SMarek Szyprowski { 56400085f1eSKrzysztof Kozlowski prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 565f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 566f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 567f99d6034SMarek Szyprowski return prot; 568f99d6034SMarek Szyprowski } 569f99d6034SMarek Szyprowski 570c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 571c7909509SMarek Szyprowski struct page **ret_page) 572ab6494f0SCatalin Marinas { 57304da5694SRussell King struct page *page; 574f1270896SGregory CLEMENT /* __alloc_simple_buffer is only called when the device is coherent */ 575f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 576c7909509SMarek Szyprowski if (!page) 577c7909509SMarek Szyprowski return NULL; 578c7909509SMarek Szyprowski 579c7909509SMarek Szyprowski *ret_page = page; 580c7909509SMarek Szyprowski return page_address(page); 581c7909509SMarek Szyprowski } 582c7909509SMarek Szyprowski 583b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 584b4268676SRabin Vincent struct page **ret_page) 585b4268676SRabin Vincent { 586b4268676SRabin Vincent return __alloc_simple_buffer(args->dev, args->size, args->gfp, 587b4268676SRabin Vincent ret_page); 588b4268676SRabin Vincent } 589c7909509SMarek Szyprowski 590b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args) 591b4268676SRabin Vincent { 592b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 593b4268676SRabin Vincent } 594b4268676SRabin Vincent 595b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = { 596b4268676SRabin Vincent .alloc = simple_allocator_alloc, 597b4268676SRabin Vincent .free = simple_allocator_free, 598b4268676SRabin Vincent }; 599b4268676SRabin Vincent 600b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 601b4268676SRabin Vincent struct page **ret_page) 602b4268676SRabin Vincent { 603b4268676SRabin Vincent return __alloc_from_contiguous(args->dev, args->size, args->prot, 604b4268676SRabin Vincent ret_page, args->caller, 605712c604dSLucas Stach args->want_vaddr, args->coherent_flag, 606712c604dSLucas Stach args->gfp); 607b4268676SRabin Vincent } 608b4268676SRabin Vincent 609b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args) 610b4268676SRabin Vincent { 611b4268676SRabin Vincent __free_from_contiguous(args->dev, args->page, args->cpu_addr, 612b4268676SRabin Vincent args->size, args->want_vaddr); 613b4268676SRabin Vincent } 614b4268676SRabin Vincent 615b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = { 616b4268676SRabin Vincent .alloc = cma_allocator_alloc, 617b4268676SRabin Vincent .free = cma_allocator_free, 618b4268676SRabin Vincent }; 619b4268676SRabin Vincent 620b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 621b4268676SRabin Vincent struct page **ret_page) 622b4268676SRabin Vincent { 623b4268676SRabin Vincent return __alloc_from_pool(args->size, ret_page); 624b4268676SRabin Vincent } 625b4268676SRabin Vincent 626b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args) 627b4268676SRabin Vincent { 628b4268676SRabin Vincent __free_from_pool(args->cpu_addr, args->size); 629b4268676SRabin Vincent } 630b4268676SRabin Vincent 631b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = { 632b4268676SRabin Vincent .alloc = pool_allocator_alloc, 633b4268676SRabin Vincent .free = pool_allocator_free, 634b4268676SRabin Vincent }; 635b4268676SRabin Vincent 636b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 637b4268676SRabin Vincent struct page **ret_page) 638b4268676SRabin Vincent { 639b4268676SRabin Vincent return __alloc_remap_buffer(args->dev, args->size, args->gfp, 640b4268676SRabin Vincent args->prot, ret_page, args->caller, 641b4268676SRabin Vincent args->want_vaddr); 642b4268676SRabin Vincent } 643b4268676SRabin Vincent 644b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args) 645b4268676SRabin Vincent { 646b4268676SRabin Vincent if (args->want_vaddr) 64778406ff5SChristoph Hellwig dma_common_free_remap(args->cpu_addr, args->size); 648b4268676SRabin Vincent 649b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 650b4268676SRabin Vincent } 651b4268676SRabin Vincent 652b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = { 653b4268676SRabin Vincent .alloc = remap_allocator_alloc, 654b4268676SRabin Vincent .free = remap_allocator_free, 655b4268676SRabin Vincent }; 656c7909509SMarek Szyprowski 657c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 6586e8266e3SCarlo Caione gfp_t gfp, pgprot_t prot, bool is_coherent, 65900085f1eSKrzysztof Kozlowski unsigned long attrs, const void *caller) 660c7909509SMarek Szyprowski { 6617607cb73SChristoph Hellwig u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 6623dd7ea92SJingoo Han struct page *page = NULL; 66331ebf944SRussell King void *addr; 664b4268676SRabin Vincent bool allowblock, cma; 66519e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 666b4268676SRabin Vincent struct arm_dma_alloc_args args = { 667b4268676SRabin Vincent .dev = dev, 668b4268676SRabin Vincent .size = PAGE_ALIGN(size), 669b4268676SRabin Vincent .gfp = gfp, 670b4268676SRabin Vincent .prot = prot, 671b4268676SRabin Vincent .caller = caller, 67200085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 673f1270896SGregory CLEMENT .coherent_flag = is_coherent ? COHERENT : NORMAL, 674b4268676SRabin Vincent }; 675ab6494f0SCatalin Marinas 676c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 677c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 678c7909509SMarek Szyprowski if (limit && size >= limit) { 679c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 680c7909509SMarek Szyprowski size, mask); 681c7909509SMarek Szyprowski return NULL; 682c7909509SMarek Szyprowski } 683c7909509SMarek Szyprowski #endif 684c7909509SMarek Szyprowski 6859c18fcf7SAlexandre Courbot buf = kzalloc(sizeof(*buf), 6869c18fcf7SAlexandre Courbot gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 68719e6e5e5SRabin Vincent if (!buf) 68819e6e5e5SRabin Vincent return NULL; 68919e6e5e5SRabin Vincent 690c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 691c7909509SMarek Szyprowski gfp |= GFP_DMA; 692c7909509SMarek Szyprowski 693ea2e7057SSumit Bhattacharya /* 694ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 695ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 696ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 697ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 698ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 699ea2e7057SSumit Bhattacharya */ 700ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 701b4268676SRabin Vincent args.gfp = gfp; 702ea2e7057SSumit Bhattacharya 70372fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 704b4268676SRabin Vincent allowblock = gfpflags_allow_blocking(gfp); 705b4268676SRabin Vincent cma = allowblock ? dev_get_cma_area(dev) : false; 70604da5694SRussell King 707b4268676SRabin Vincent if (cma) 708b4268676SRabin Vincent buf->allocator = &cma_allocator; 7091655cf88SVladimir Murzin else if (is_coherent) 710b4268676SRabin Vincent buf->allocator = &simple_allocator; 711b4268676SRabin Vincent else if (allowblock) 712b4268676SRabin Vincent buf->allocator = &remap_allocator; 71331ebf944SRussell King else 714b4268676SRabin Vincent buf->allocator = &pool_allocator; 715b4268676SRabin Vincent 716b4268676SRabin Vincent addr = buf->allocator->alloc(&args, &page); 71731ebf944SRussell King 71819e6e5e5SRabin Vincent if (page) { 71919e6e5e5SRabin Vincent unsigned long flags; 72019e6e5e5SRabin Vincent 7219eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 722b4268676SRabin Vincent buf->virt = args.want_vaddr ? addr : page; 72319e6e5e5SRabin Vincent 72419e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 72519e6e5e5SRabin Vincent list_add(&buf->list, &arm_dma_bufs); 72619e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 72719e6e5e5SRabin Vincent } else { 72819e6e5e5SRabin Vincent kfree(buf); 72919e6e5e5SRabin Vincent } 73031ebf944SRussell King 731b4268676SRabin Vincent return args.want_vaddr ? addr : page; 732ab6494f0SCatalin Marinas } 733695ae0afSRussell King 7340ddbccd1SRussell King /* 7350ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 7360ddbccd1SRussell King * virtual and bus address for that space. 7370ddbccd1SRussell King */ 738f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 73900085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs) 7400ddbccd1SRussell King { 7410ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 7420ddbccd1SRussell King 743dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 7446e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 745dd37e940SRob Herring } 746dd37e940SRob Herring 747dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 74800085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 749dd37e940SRob Herring { 75021caf3a7SLorenzo Nava return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 7516e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 7520ddbccd1SRussell King } 7530ddbccd1SRussell King 75455af8a91SMike Looijmans static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 755f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 75600085f1eSKrzysztof Kozlowski unsigned long attrs) 7570ddbccd1SRussell King { 758c2a3831dSNathan Jones int ret = -ENXIO; 759a70c3ee3SFabio Estevam unsigned long nr_vma_pages = vma_pages(vma); 76050262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 761c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 76250262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 76350262a4bSMarek Szyprowski 76443fc509cSVladimir Murzin if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 76547142f07SMarek Szyprowski return ret; 76647142f07SMarek Szyprowski 76750262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 7680ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 76950262a4bSMarek Szyprowski pfn + off, 770c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 7710ddbccd1SRussell King vma->vm_page_prot); 77250262a4bSMarek Szyprowski } 7730ddbccd1SRussell King 7740ddbccd1SRussell King return ret; 7750ddbccd1SRussell King } 7760ddbccd1SRussell King 7770ddbccd1SRussell King /* 77855af8a91SMike Looijmans * Create userspace mapping for the DMA-coherent memory. 77955af8a91SMike Looijmans */ 78055af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 78155af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 78200085f1eSKrzysztof Kozlowski unsigned long attrs) 78355af8a91SMike Looijmans { 78455af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 78555af8a91SMike Looijmans } 78655af8a91SMike Looijmans 78755af8a91SMike Looijmans int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 78855af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 78900085f1eSKrzysztof Kozlowski unsigned long attrs) 79055af8a91SMike Looijmans { 79155af8a91SMike Looijmans vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 79255af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 79355af8a91SMike Looijmans } 79455af8a91SMike Looijmans 79555af8a91SMike Looijmans /* 796c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 7970ddbccd1SRussell King */ 798dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 79900085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, 800dd37e940SRob Herring bool is_coherent) 8010ddbccd1SRussell King { 802c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 80319e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 804b4268676SRabin Vincent struct arm_dma_free_args args = { 805b4268676SRabin Vincent .dev = dev, 806b4268676SRabin Vincent .size = PAGE_ALIGN(size), 807b4268676SRabin Vincent .cpu_addr = cpu_addr, 808b4268676SRabin Vincent .page = page, 80900085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 810b4268676SRabin Vincent }; 81119e6e5e5SRabin Vincent 81219e6e5e5SRabin Vincent buf = arm_dma_buffer_find(cpu_addr); 81319e6e5e5SRabin Vincent if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 81419e6e5e5SRabin Vincent return; 8150ddbccd1SRussell King 816b4268676SRabin Vincent buf->allocator->free(&args); 81719e6e5e5SRabin Vincent kfree(buf); 8180ddbccd1SRussell King } 819afd1a321SRussell King 820dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 82100085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 822dd37e940SRob Herring { 823dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 824dd37e940SRob Herring } 825dd37e940SRob Herring 826dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 82700085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 828dd37e940SRob Herring { 829dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 830dd37e940SRob Herring } 831dd37e940SRob Herring 832dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 833dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 83400085f1eSKrzysztof Kozlowski unsigned long attrs) 835dc2832e1SMarek Szyprowski { 836916a008bSRussell King unsigned long pfn = dma_to_pfn(dev, handle); 837916a008bSRussell King struct page *page; 838dc2832e1SMarek Szyprowski int ret; 839dc2832e1SMarek Szyprowski 840916a008bSRussell King /* If the PFN is not valid, we do not have a struct page */ 841916a008bSRussell King if (!pfn_valid(pfn)) 842916a008bSRussell King return -ENXIO; 843916a008bSRussell King 844916a008bSRussell King page = pfn_to_page(pfn); 845916a008bSRussell King 846dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 847dc2832e1SMarek Szyprowski if (unlikely(ret)) 848dc2832e1SMarek Szyprowski return ret; 849dc2832e1SMarek Szyprowski 850dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 851dc2832e1SMarek Szyprowski return 0; 852dc2832e1SMarek Szyprowski } 853dc2832e1SMarek Szyprowski 85465af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 855a9c9147eSRussell King size_t size, enum dma_data_direction dir, 856a9c9147eSRussell King void (*op)(const void *, size_t, int)) 85765af191aSRussell King { 85815653371SRussell King unsigned long pfn; 85915653371SRussell King size_t left = size; 86015653371SRussell King 86115653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 86215653371SRussell King offset %= PAGE_SIZE; 86315653371SRussell King 86465af191aSRussell King /* 86565af191aSRussell King * A single sg entry may refer to multiple physically contiguous 86665af191aSRussell King * pages. But we still need to process highmem pages individually. 86765af191aSRussell King * If highmem is not configured then the bulk of this loop gets 86865af191aSRussell King * optimized out. 86965af191aSRussell King */ 87065af191aSRussell King do { 87165af191aSRussell King size_t len = left; 87293f1d629SRussell King void *vaddr; 87393f1d629SRussell King 87415653371SRussell King page = pfn_to_page(pfn); 87515653371SRussell King 87693f1d629SRussell King if (PageHighMem(page)) { 87715653371SRussell King if (len + offset > PAGE_SIZE) 87865af191aSRussell King len = PAGE_SIZE - offset; 879dd0f67f4SJoonsoo Kim 880dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 88139af22a7SNicolas Pitre vaddr = kmap_atomic(page); 8827e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 88339af22a7SNicolas Pitre kunmap_atomic(vaddr); 884dd0f67f4SJoonsoo Kim } else { 885dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 886dd0f67f4SJoonsoo Kim if (vaddr) { 887dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 888dd0f67f4SJoonsoo Kim kunmap_high(page); 889dd0f67f4SJoonsoo Kim } 89093f1d629SRussell King } 89193f1d629SRussell King } else { 89293f1d629SRussell King vaddr = page_address(page) + offset; 893a9c9147eSRussell King op(vaddr, len, dir); 89493f1d629SRussell King } 89565af191aSRussell King offset = 0; 89615653371SRussell King pfn++; 89765af191aSRussell King left -= len; 89865af191aSRussell King } while (left); 89965af191aSRussell King } 90065af191aSRussell King 90151fde349SMarek Szyprowski /* 90251fde349SMarek Szyprowski * Make an area consistent for devices. 90351fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 90451fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 90551fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 90651fde349SMarek Szyprowski */ 90751fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 90865af191aSRussell King size_t size, enum dma_data_direction dir) 90965af191aSRussell King { 9102161c248SSantosh Shilimkar phys_addr_t paddr; 91143377453SNicolas Pitre 912a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 91343377453SNicolas Pitre 91465af191aSRussell King paddr = page_to_phys(page) + off; 9152ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 9162ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9172ffe2da3SRussell King } else { 9182ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 9192ffe2da3SRussell King } 9202ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 92143377453SNicolas Pitre } 9224ea0d737SRussell King 92351fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 9244ea0d737SRussell King size_t size, enum dma_data_direction dir) 9254ea0d737SRussell King { 9262161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 9272ffe2da3SRussell King 9282ffe2da3SRussell King /* FIXME: non-speculating: not required */ 929deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 930deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 9312ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9322ffe2da3SRussell King 933a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 934deace4a6SRussell King } 935c0177800SCatalin Marinas 936c0177800SCatalin Marinas /* 937b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 938c0177800SCatalin Marinas */ 939b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 940b2a234edSMing Lei unsigned long pfn; 941b2a234edSMing Lei size_t left = size; 942b2a234edSMing Lei 943b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 944b2a234edSMing Lei off %= PAGE_SIZE; 945b2a234edSMing Lei if (off) { 946b2a234edSMing Lei pfn++; 947b2a234edSMing Lei left -= PAGE_SIZE - off; 948b2a234edSMing Lei } 949b2a234edSMing Lei while (left >= PAGE_SIZE) { 950b2a234edSMing Lei page = pfn_to_page(pfn++); 951c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 952b2a234edSMing Lei left -= PAGE_SIZE; 953b2a234edSMing Lei } 954b2a234edSMing Lei } 9554ea0d737SRussell King } 95643377453SNicolas Pitre 957afd1a321SRussell King /** 9582a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 959afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 960afd1a321SRussell King * @sg: list of buffers 961afd1a321SRussell King * @nents: number of buffers to map 962afd1a321SRussell King * @dir: DMA transfer direction 963afd1a321SRussell King * 964afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 965afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 966afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 967afd1a321SRussell King * appropriate dma address and length. They are obtained via 968afd1a321SRussell King * sg_dma_{address,length}. 969afd1a321SRussell King * 970afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 971afd1a321SRussell King * here. 972afd1a321SRussell King */ 9732dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 97400085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 975afd1a321SRussell King { 9765299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 977afd1a321SRussell King struct scatterlist *s; 97801135d92SRussell King int i, j; 979afd1a321SRussell King 980afd1a321SRussell King for_each_sg(sg, s, nents, i) { 9814ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 9824ce63fcdSMarek Szyprowski s->dma_length = s->length; 9834ce63fcdSMarek Szyprowski #endif 9842a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 9852a550e73SMarek Szyprowski s->length, dir, attrs); 98601135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 98701135d92SRussell King goto bad_mapping; 988afd1a321SRussell King } 989afd1a321SRussell King return nents; 99001135d92SRussell King 99101135d92SRussell King bad_mapping: 99201135d92SRussell King for_each_sg(sg, s, i, j) 9932a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 99401135d92SRussell King return 0; 995afd1a321SRussell King } 996afd1a321SRussell King 997afd1a321SRussell King /** 9982a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 999afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1000afd1a321SRussell King * @sg: list of buffers 10010adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1002afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1003afd1a321SRussell King * 1004afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 1005afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 1006afd1a321SRussell King */ 10072dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 100800085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1009afd1a321SRussell King { 10105299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 101101135d92SRussell King struct scatterlist *s; 101201135d92SRussell King 101301135d92SRussell King int i; 101424056f52SRussell King 101501135d92SRussell King for_each_sg(sg, s, nents, i) 10162a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1017afd1a321SRussell King } 1018afd1a321SRussell King 1019afd1a321SRussell King /** 10202a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 1021afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1022afd1a321SRussell King * @sg: list of buffers 1023afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1024afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1025afd1a321SRussell King */ 10262dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1027afd1a321SRussell King int nents, enum dma_data_direction dir) 1028afd1a321SRussell King { 10295299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1030afd1a321SRussell King struct scatterlist *s; 1031afd1a321SRussell King int i; 1032afd1a321SRussell King 10332a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10342a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 10352a550e73SMarek Szyprowski dir); 1036afd1a321SRussell King } 103724056f52SRussell King 1038afd1a321SRussell King /** 10392a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 1040afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1041afd1a321SRussell King * @sg: list of buffers 1042afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1043afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1044afd1a321SRussell King */ 10452dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1046afd1a321SRussell King int nents, enum dma_data_direction dir) 1047afd1a321SRussell King { 10485299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1049afd1a321SRussell King struct scatterlist *s; 1050afd1a321SRussell King int i; 1051afd1a321SRussell King 10522a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10532a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 10542a550e73SMarek Szyprowski dir); 1055afd1a321SRussell King } 105624056f52SRussell King 1057022ae537SRussell King /* 1058022ae537SRussell King * Return whether the given device DMA address mask can be supported 1059022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 1060022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 1061022ae537SRussell King * to this function. 1062022ae537SRussell King */ 1063418a7a7eSChristoph Hellwig int arm_dma_supported(struct device *dev, u64 mask) 1064022ae537SRussell King { 1065fd509249SChristoph Hellwig return __dma_supported(dev, mask); 1066022ae537SRussell King } 1067022ae537SRussell King 10681874619aSThierry Reding static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 10691874619aSThierry Reding { 1070ad3c7b18SChristoph Hellwig /* 1071ad3c7b18SChristoph Hellwig * When CONFIG_ARM_LPAE is set, physical address can extend above 1072ad3c7b18SChristoph Hellwig * 32-bits, which then can't be addressed by devices that only support 1073ad3c7b18SChristoph Hellwig * 32-bit DMA. 1074ad3c7b18SChristoph Hellwig * Use the generic dma-direct / swiotlb ops code in that case, as that 1075ad3c7b18SChristoph Hellwig * handles bounce buffering for us. 1076ad3c7b18SChristoph Hellwig */ 1077ad3c7b18SChristoph Hellwig if (IS_ENABLED(CONFIG_ARM_LPAE)) 1078ad3c7b18SChristoph Hellwig return NULL; 10791874619aSThierry Reding return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 10801874619aSThierry Reding } 10811874619aSThierry Reding 10824ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 10834ce63fcdSMarek Szyprowski 10847d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 10857d2822dfSSricharan R { 10867d2822dfSSricharan R int prot = 0; 10877d2822dfSSricharan R 10887d2822dfSSricharan R if (attrs & DMA_ATTR_PRIVILEGED) 10897d2822dfSSricharan R prot |= IOMMU_PRIV; 10907d2822dfSSricharan R 10917d2822dfSSricharan R switch (dir) { 10927d2822dfSSricharan R case DMA_BIDIRECTIONAL: 10937d2822dfSSricharan R return prot | IOMMU_READ | IOMMU_WRITE; 10947d2822dfSSricharan R case DMA_TO_DEVICE: 10957d2822dfSSricharan R return prot | IOMMU_READ; 10967d2822dfSSricharan R case DMA_FROM_DEVICE: 10977d2822dfSSricharan R return prot | IOMMU_WRITE; 10987d2822dfSSricharan R default: 10997d2822dfSSricharan R return prot; 11007d2822dfSSricharan R } 11017d2822dfSSricharan R } 11027d2822dfSSricharan R 11034ce63fcdSMarek Szyprowski /* IOMMU */ 11044ce63fcdSMarek Szyprowski 11054d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 11064d852ef8SAndreas Herrmann 11074ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 11084ce63fcdSMarek Szyprowski size_t size) 11094ce63fcdSMarek Szyprowski { 11104ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 11114ce63fcdSMarek Szyprowski unsigned int align = 0; 11124ce63fcdSMarek Szyprowski unsigned int count, start; 1113006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 11144ce63fcdSMarek Szyprowski unsigned long flags; 11154d852ef8SAndreas Herrmann dma_addr_t iova; 11164d852ef8SAndreas Herrmann int i; 11174ce63fcdSMarek Szyprowski 111860460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 111960460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 112060460abfSSeung-Woo Kim 112168efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 112268efd7d2SMarek Szyprowski align = (1 << order) - 1; 11234ce63fcdSMarek Szyprowski 11244ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11254d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 11264d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11274d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11284d852ef8SAndreas Herrmann 11294d852ef8SAndreas Herrmann if (start > mapping->bits) 11304d852ef8SAndreas Herrmann continue; 11314d852ef8SAndreas Herrmann 11324d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11334d852ef8SAndreas Herrmann break; 11344d852ef8SAndreas Herrmann } 11354d852ef8SAndreas Herrmann 11364d852ef8SAndreas Herrmann /* 11374d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 11384d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 11394d852ef8SAndreas Herrmann * address range of size bytes. 11404d852ef8SAndreas Herrmann */ 11414d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 11424d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 11434d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 114472fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11454d852ef8SAndreas Herrmann } 11464d852ef8SAndreas Herrmann 11474d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11484d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11494d852ef8SAndreas Herrmann 11504ce63fcdSMarek Szyprowski if (start > mapping->bits) { 11514ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 115272fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11534ce63fcdSMarek Szyprowski } 11544ce63fcdSMarek Szyprowski 11554d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11564d852ef8SAndreas Herrmann } 11574ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11584ce63fcdSMarek Szyprowski 1159006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 116068efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 11614d852ef8SAndreas Herrmann 11624d852ef8SAndreas Herrmann return iova; 11634ce63fcdSMarek Szyprowski } 11644ce63fcdSMarek Szyprowski 11654ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 11664ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 11674ce63fcdSMarek Szyprowski { 11684d852ef8SAndreas Herrmann unsigned int start, count; 1169006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 11704ce63fcdSMarek Szyprowski unsigned long flags; 11714d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 11724d852ef8SAndreas Herrmann u32 bitmap_index; 11734d852ef8SAndreas Herrmann 11744d852ef8SAndreas Herrmann if (!size) 11754d852ef8SAndreas Herrmann return; 11764d852ef8SAndreas Herrmann 1177006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 11784d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 11794d852ef8SAndreas Herrmann 1180006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 11814d852ef8SAndreas Herrmann 118268efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 11834d852ef8SAndreas Herrmann 1184006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 11854d852ef8SAndreas Herrmann /* 11864d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 11874d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 11884d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 11894d852ef8SAndreas Herrmann * moment). 11904d852ef8SAndreas Herrmann */ 11914d852ef8SAndreas Herrmann BUG(); 11924d852ef8SAndreas Herrmann } else 119368efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 11944ce63fcdSMarek Szyprowski 11954ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11964d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 11974ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11984ce63fcdSMarek Szyprowski } 11994ce63fcdSMarek Szyprowski 120033298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 120133298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 }; 120233298ef6SDoug Anderson 1203549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 120400085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs, 1205f1270896SGregory CLEMENT int coherent_flag) 12064ce63fcdSMarek Szyprowski { 12074ce63fcdSMarek Szyprowski struct page **pages; 12084ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12094ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 12104ce63fcdSMarek Szyprowski int i = 0; 121133298ef6SDoug Anderson int order_idx = 0; 12124ce63fcdSMarek Szyprowski 12134ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 121423be7fdaSAlexandre Courbot pages = kzalloc(array_size, GFP_KERNEL); 12154ce63fcdSMarek Szyprowski else 12164ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 12174ce63fcdSMarek Szyprowski if (!pages) 12184ce63fcdSMarek Szyprowski return NULL; 12194ce63fcdSMarek Szyprowski 122000085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1221549a17e4SMarek Szyprowski { 1222549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1223549a17e4SMarek Szyprowski struct page *page; 1224549a17e4SMarek Szyprowski 1225d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, 1226d834c5abSMarek Szyprowski gfp & __GFP_NOWARN); 1227549a17e4SMarek Szyprowski if (!page) 1228549a17e4SMarek Szyprowski goto error; 1229549a17e4SMarek Szyprowski 1230f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 1231549a17e4SMarek Szyprowski 1232549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1233549a17e4SMarek Szyprowski pages[i] = page + i; 1234549a17e4SMarek Szyprowski 1235549a17e4SMarek Szyprowski return pages; 1236549a17e4SMarek Szyprowski } 1237549a17e4SMarek Szyprowski 123814d3ae2eSDoug Anderson /* Go straight to 4K chunks if caller says it's OK. */ 123900085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 124014d3ae2eSDoug Anderson order_idx = ARRAY_SIZE(iommu_order_array) - 1; 124114d3ae2eSDoug Anderson 1242f8669befSMarek Szyprowski /* 1243f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1244f8669befSMarek Szyprowski */ 1245f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1246f8669befSMarek Szyprowski 12474ce63fcdSMarek Szyprowski while (count) { 124849f28aa6STomasz Figa int j, order; 12494ce63fcdSMarek Szyprowski 125033298ef6SDoug Anderson order = iommu_order_array[order_idx]; 125133298ef6SDoug Anderson 125233298ef6SDoug Anderson /* Drop down when we get small */ 125333298ef6SDoug Anderson if (__fls(count) < order) { 125433298ef6SDoug Anderson order_idx++; 125533298ef6SDoug Anderson continue; 125649f28aa6STomasz Figa } 125749f28aa6STomasz Figa 125833298ef6SDoug Anderson if (order) { 125933298ef6SDoug Anderson /* See if it's easy to allocate a high-order chunk */ 126033298ef6SDoug Anderson pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 126133298ef6SDoug Anderson 126233298ef6SDoug Anderson /* Go down a notch at first sign of pressure */ 126349f28aa6STomasz Figa if (!pages[i]) { 126433298ef6SDoug Anderson order_idx++; 126533298ef6SDoug Anderson continue; 126633298ef6SDoug Anderson } 126733298ef6SDoug Anderson } else { 126849f28aa6STomasz Figa pages[i] = alloc_pages(gfp, 0); 12694ce63fcdSMarek Szyprowski if (!pages[i]) 12704ce63fcdSMarek Szyprowski goto error; 127149f28aa6STomasz Figa } 12724ce63fcdSMarek Szyprowski 12735a796eebSHiroshi Doyu if (order) { 12744ce63fcdSMarek Szyprowski split_page(pages[i], order); 12754ce63fcdSMarek Szyprowski j = 1 << order; 12764ce63fcdSMarek Szyprowski while (--j) 12774ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 12785a796eebSHiroshi Doyu } 12794ce63fcdSMarek Szyprowski 1280f1270896SGregory CLEMENT __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 12814ce63fcdSMarek Szyprowski i += 1 << order; 12824ce63fcdSMarek Szyprowski count -= 1 << order; 12834ce63fcdSMarek Szyprowski } 12844ce63fcdSMarek Szyprowski 12854ce63fcdSMarek Szyprowski return pages; 12864ce63fcdSMarek Szyprowski error: 12879fa8af91SMarek Szyprowski while (i--) 12884ce63fcdSMarek Szyprowski if (pages[i]) 12894ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 12901d5cfdb0STetsuo Handa kvfree(pages); 12914ce63fcdSMarek Szyprowski return NULL; 12924ce63fcdSMarek Szyprowski } 12934ce63fcdSMarek Szyprowski 1294549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 129500085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 12964ce63fcdSMarek Szyprowski { 12974ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12984ce63fcdSMarek Szyprowski int i; 1299549a17e4SMarek Szyprowski 130000085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1301549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1302549a17e4SMarek Szyprowski } else { 13034ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 13044ce63fcdSMarek Szyprowski if (pages[i]) 13054ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1306549a17e4SMarek Szyprowski } 1307549a17e4SMarek Szyprowski 13081d5cfdb0STetsuo Handa kvfree(pages); 13094ce63fcdSMarek Szyprowski return 0; 13104ce63fcdSMarek Szyprowski } 13114ce63fcdSMarek Szyprowski 13124ce63fcdSMarek Szyprowski /* 13134ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 13144ce63fcdSMarek Szyprowski */ 13154ce63fcdSMarek Szyprowski static dma_addr_t 13167d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 13177d2822dfSSricharan R unsigned long attrs) 13184ce63fcdSMarek Szyprowski { 131989cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13204ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 13214ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 132290cde558SAndre Przywara int i; 13234ce63fcdSMarek Szyprowski 13244ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 132572fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 13264ce63fcdSMarek Szyprowski return dma_addr; 13274ce63fcdSMarek Szyprowski 13284ce63fcdSMarek Szyprowski iova = dma_addr; 13294ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 133090cde558SAndre Przywara int ret; 133190cde558SAndre Przywara 13324ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 13334ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 13344ce63fcdSMarek Szyprowski unsigned int len, j; 13354ce63fcdSMarek Szyprowski 13364ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 13374ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 13384ce63fcdSMarek Szyprowski break; 13394ce63fcdSMarek Szyprowski 13404ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1341c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 13427d2822dfSSricharan R __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 13434ce63fcdSMarek Szyprowski if (ret < 0) 13444ce63fcdSMarek Szyprowski goto fail; 13454ce63fcdSMarek Szyprowski iova += len; 13464ce63fcdSMarek Szyprowski i = j; 13474ce63fcdSMarek Szyprowski } 13484ce63fcdSMarek Szyprowski return dma_addr; 13494ce63fcdSMarek Szyprowski fail: 13504ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 13514ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 135272fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 13534ce63fcdSMarek Szyprowski } 13544ce63fcdSMarek Szyprowski 13554ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 13564ce63fcdSMarek Szyprowski { 135789cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13584ce63fcdSMarek Szyprowski 13594ce63fcdSMarek Szyprowski /* 13604ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 13614ce63fcdSMarek Szyprowski * result to page size 13624ce63fcdSMarek Szyprowski */ 13634ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 13644ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 13654ce63fcdSMarek Szyprowski 13664ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 13674ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 13684ce63fcdSMarek Szyprowski return 0; 13694ce63fcdSMarek Szyprowski } 13704ce63fcdSMarek Szyprowski 1371665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1372665bad7bSHiroshi Doyu { 137336d0fd21SLaura Abbott struct page *page; 137436d0fd21SLaura Abbott phys_addr_t phys; 1375665bad7bSHiroshi Doyu 137636d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 137736d0fd21SLaura Abbott page = phys_to_page(phys); 137836d0fd21SLaura Abbott 137936d0fd21SLaura Abbott return (struct page **)page; 1380665bad7bSHiroshi Doyu } 1381665bad7bSHiroshi Doyu 138200085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1383e9da6e99SMarek Szyprowski { 1384665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1385665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1386665bad7bSHiroshi Doyu 138700085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1388955c757eSMarek Szyprowski return cpu_addr; 1389955c757eSMarek Szyprowski 13905cf45379SChristoph Hellwig return dma_common_find_pages(cpu_addr); 1391e9da6e99SMarek Szyprowski } 1392e9da6e99SMarek Szyprowski 139356506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 13947d2822dfSSricharan R dma_addr_t *handle, int coherent_flag, 13957d2822dfSSricharan R unsigned long attrs) 1396479ed93aSHiroshi Doyu { 1397479ed93aSHiroshi Doyu struct page *page; 1398479ed93aSHiroshi Doyu void *addr; 1399479ed93aSHiroshi Doyu 140056506822SGregory CLEMENT if (coherent_flag == COHERENT) 140156506822SGregory CLEMENT addr = __alloc_simple_buffer(dev, size, gfp, &page); 140256506822SGregory CLEMENT else 1403479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1404479ed93aSHiroshi Doyu if (!addr) 1405479ed93aSHiroshi Doyu return NULL; 1406479ed93aSHiroshi Doyu 14077d2822dfSSricharan R *handle = __iommu_create_mapping(dev, &page, size, attrs); 140872fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 1409479ed93aSHiroshi Doyu goto err_mapping; 1410479ed93aSHiroshi Doyu 1411479ed93aSHiroshi Doyu return addr; 1412479ed93aSHiroshi Doyu 1413479ed93aSHiroshi Doyu err_mapping: 1414479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1415479ed93aSHiroshi Doyu return NULL; 1416479ed93aSHiroshi Doyu } 1417479ed93aSHiroshi Doyu 1418d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 141956506822SGregory CLEMENT dma_addr_t handle, size_t size, int coherent_flag) 1420479ed93aSHiroshi Doyu { 1421479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 142256506822SGregory CLEMENT if (coherent_flag == COHERENT) 142356506822SGregory CLEMENT __dma_free_buffer(virt_to_page(cpu_addr), size); 142456506822SGregory CLEMENT else 1425d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1426479ed93aSHiroshi Doyu } 1427479ed93aSHiroshi Doyu 142856506822SGregory CLEMENT static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 142900085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 143056506822SGregory CLEMENT int coherent_flag) 14314ce63fcdSMarek Szyprowski { 143271b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 14334ce63fcdSMarek Szyprowski struct page **pages; 14344ce63fcdSMarek Szyprowski void *addr = NULL; 14354ce63fcdSMarek Szyprowski 143672fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 14374ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14384ce63fcdSMarek Szyprowski 143956506822SGregory CLEMENT if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 144056506822SGregory CLEMENT return __iommu_alloc_simple(dev, size, gfp, handle, 14417d2822dfSSricharan R coherent_flag, attrs); 1442479ed93aSHiroshi Doyu 14435b91a98cSRichard Zhao /* 14445b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 14455b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 14465b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 14475b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 14485b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 14495b91a98cSRichard Zhao */ 14505b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 14515b91a98cSRichard Zhao 145256506822SGregory CLEMENT pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 14534ce63fcdSMarek Szyprowski if (!pages) 14544ce63fcdSMarek Szyprowski return NULL; 14554ce63fcdSMarek Szyprowski 14567d2822dfSSricharan R *handle = __iommu_create_mapping(dev, pages, size, attrs); 145772fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 14584ce63fcdSMarek Szyprowski goto err_buffer; 14594ce63fcdSMarek Szyprowski 146000085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1461955c757eSMarek Szyprowski return pages; 1462955c757eSMarek Szyprowski 146378406ff5SChristoph Hellwig addr = dma_common_pages_remap(pages, size, prot, 1464e9da6e99SMarek Szyprowski __builtin_return_address(0)); 14654ce63fcdSMarek Szyprowski if (!addr) 14664ce63fcdSMarek Szyprowski goto err_mapping; 14674ce63fcdSMarek Szyprowski 14684ce63fcdSMarek Szyprowski return addr; 14694ce63fcdSMarek Szyprowski 14704ce63fcdSMarek Szyprowski err_mapping: 14714ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 14724ce63fcdSMarek Szyprowski err_buffer: 1473549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 14744ce63fcdSMarek Szyprowski return NULL; 14754ce63fcdSMarek Szyprowski } 14764ce63fcdSMarek Szyprowski 147756506822SGregory CLEMENT static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 147800085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 147956506822SGregory CLEMENT { 148056506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 148156506822SGregory CLEMENT } 148256506822SGregory CLEMENT 148356506822SGregory CLEMENT static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 148400085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 148556506822SGregory CLEMENT { 148656506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 148756506822SGregory CLEMENT } 148856506822SGregory CLEMENT 148956506822SGregory CLEMENT static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 14904ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 149100085f1eSKrzysztof Kozlowski unsigned long attrs) 14924ce63fcdSMarek Szyprowski { 1493955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1494371f0f08SMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 14956248461dSSouptick Joarder int err; 1496e9da6e99SMarek Szyprowski 1497e9da6e99SMarek Szyprowski if (!pages) 1498e9da6e99SMarek Szyprowski return -ENXIO; 14994ce63fcdSMarek Szyprowski 15006248461dSSouptick Joarder if (vma->vm_pgoff >= nr_pages) 1501371f0f08SMarek Szyprowski return -ENXIO; 1502371f0f08SMarek Szyprowski 15036248461dSSouptick Joarder err = vm_map_pages(vma, pages, nr_pages); 15046248461dSSouptick Joarder if (err) 15056248461dSSouptick Joarder pr_err("Remapping memory failed: %d\n", err); 15067e312103SMarek Szyprowski 15076248461dSSouptick Joarder return err; 15084ce63fcdSMarek Szyprowski } 150956506822SGregory CLEMENT static int arm_iommu_mmap_attrs(struct device *dev, 151056506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 151100085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 151256506822SGregory CLEMENT { 151356506822SGregory CLEMENT vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 151456506822SGregory CLEMENT 151556506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 151656506822SGregory CLEMENT } 151756506822SGregory CLEMENT 151856506822SGregory CLEMENT static int arm_coherent_iommu_mmap_attrs(struct device *dev, 151956506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 152000085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 152156506822SGregory CLEMENT { 152256506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 152356506822SGregory CLEMENT } 15244ce63fcdSMarek Szyprowski 15254ce63fcdSMarek Szyprowski /* 15264ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 15274ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 15284ce63fcdSMarek Szyprowski */ 152917fe8684SBen Dooks static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 153000085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, int coherent_flag) 15314ce63fcdSMarek Szyprowski { 1532836bfa0dSYoungJun Cho struct page **pages; 15334ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15344ce63fcdSMarek Szyprowski 153556506822SGregory CLEMENT if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 153656506822SGregory CLEMENT __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1537479ed93aSHiroshi Doyu return; 1538479ed93aSHiroshi Doyu } 1539479ed93aSHiroshi Doyu 1540836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1541836bfa0dSYoungJun Cho if (!pages) { 1542836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1543836bfa0dSYoungJun Cho return; 1544836bfa0dSYoungJun Cho } 1545836bfa0dSYoungJun Cho 1546fe9041c2SChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 154751231740SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 1548e9da6e99SMarek Szyprowski 15494ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1550549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15514ce63fcdSMarek Szyprowski } 15524ce63fcdSMarek Szyprowski 155317fe8684SBen Dooks static void arm_iommu_free_attrs(struct device *dev, size_t size, 155417fe8684SBen Dooks void *cpu_addr, dma_addr_t handle, 155517fe8684SBen Dooks unsigned long attrs) 155656506822SGregory CLEMENT { 155756506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 155856506822SGregory CLEMENT } 155956506822SGregory CLEMENT 156017fe8684SBen Dooks static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 156100085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 156256506822SGregory CLEMENT { 156356506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 156456506822SGregory CLEMENT } 156556506822SGregory CLEMENT 1566dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1567dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 156800085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 1569dc2832e1SMarek Szyprowski { 1570dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1571dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1572dc2832e1SMarek Szyprowski 1573dc2832e1SMarek Szyprowski if (!pages) 1574dc2832e1SMarek Szyprowski return -ENXIO; 1575dc2832e1SMarek Szyprowski 1576dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1577dc2832e1SMarek Szyprowski GFP_KERNEL); 15784ce63fcdSMarek Szyprowski } 15794ce63fcdSMarek Szyprowski 15804ce63fcdSMarek Szyprowski /* 15814ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 15824ce63fcdSMarek Szyprowski */ 15834ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 15844ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 158500085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 15860fa478dfSRob Herring bool is_coherent) 15874ce63fcdSMarek Szyprowski { 158889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 15894ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 15904ce63fcdSMarek Szyprowski int ret = 0; 15914ce63fcdSMarek Szyprowski unsigned int count; 15924ce63fcdSMarek Szyprowski struct scatterlist *s; 1593c9b24996SAndreas Herrmann int prot; 15944ce63fcdSMarek Szyprowski 15954ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 159672fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 15974ce63fcdSMarek Szyprowski 15984ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 159972fd97bfSChristoph Hellwig if (iova == DMA_MAPPING_ERROR) 16004ce63fcdSMarek Szyprowski return -ENOMEM; 16014ce63fcdSMarek Szyprowski 16024ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 16033e6110fdSDan Williams phys_addr_t phys = page_to_phys(sg_page(s)); 16044ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 16054ce63fcdSMarek Szyprowski 160600085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16074ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16084ce63fcdSMarek Szyprowski 16097d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 1610c9b24996SAndreas Herrmann 1611c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 16124ce63fcdSMarek Szyprowski if (ret < 0) 16134ce63fcdSMarek Szyprowski goto fail; 16144ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 16154ce63fcdSMarek Szyprowski iova += len; 16164ce63fcdSMarek Szyprowski } 16174ce63fcdSMarek Szyprowski *handle = iova_base; 16184ce63fcdSMarek Szyprowski 16194ce63fcdSMarek Szyprowski return 0; 16204ce63fcdSMarek Szyprowski fail: 16214ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 16224ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 16234ce63fcdSMarek Szyprowski return ret; 16244ce63fcdSMarek Szyprowski } 16254ce63fcdSMarek Szyprowski 16260fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 162700085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16280fa478dfSRob Herring bool is_coherent) 16294ce63fcdSMarek Szyprowski { 16304ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 16314ce63fcdSMarek Szyprowski int i, count = 0; 16324ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 16334ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 16344ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 16354ce63fcdSMarek Szyprowski 16364ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 16374ce63fcdSMarek Szyprowski s = sg_next(s); 16384ce63fcdSMarek Szyprowski 163972fd97bfSChristoph Hellwig s->dma_address = DMA_MAPPING_ERROR; 16404ce63fcdSMarek Szyprowski s->dma_length = 0; 16414ce63fcdSMarek Szyprowski 16424ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 16434ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 16440fa478dfSRob Herring dir, attrs, is_coherent) < 0) 16454ce63fcdSMarek Szyprowski goto bad_mapping; 16464ce63fcdSMarek Szyprowski 16474ce63fcdSMarek Szyprowski dma->dma_address += offset; 16484ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 16494ce63fcdSMarek Szyprowski 16504ce63fcdSMarek Szyprowski size = offset = s->offset; 16514ce63fcdSMarek Szyprowski start = s; 16524ce63fcdSMarek Szyprowski dma = sg_next(dma); 16534ce63fcdSMarek Szyprowski count += 1; 16544ce63fcdSMarek Szyprowski } 16554ce63fcdSMarek Szyprowski size += s->length; 16564ce63fcdSMarek Szyprowski } 16570fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 16580fa478dfSRob Herring is_coherent) < 0) 16594ce63fcdSMarek Szyprowski goto bad_mapping; 16604ce63fcdSMarek Szyprowski 16614ce63fcdSMarek Szyprowski dma->dma_address += offset; 16624ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 16634ce63fcdSMarek Szyprowski 16644ce63fcdSMarek Szyprowski return count+1; 16654ce63fcdSMarek Szyprowski 16664ce63fcdSMarek Szyprowski bad_mapping: 16674ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 16684ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 16694ce63fcdSMarek Szyprowski return 0; 16704ce63fcdSMarek Szyprowski } 16714ce63fcdSMarek Szyprowski 16724ce63fcdSMarek Szyprowski /** 16730fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 16740fa478dfSRob Herring * @dev: valid struct device pointer 16750fa478dfSRob Herring * @sg: list of buffers 16760fa478dfSRob Herring * @nents: number of buffers to map 16770fa478dfSRob Herring * @dir: DMA transfer direction 16780fa478dfSRob Herring * 16790fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 16800fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 16810fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 16820fa478dfSRob Herring * obtained via sg_dma_{address,length}. 16830fa478dfSRob Herring */ 168417fe8684SBen Dooks static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 168500085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 16860fa478dfSRob Herring { 16870fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 16880fa478dfSRob Herring } 16890fa478dfSRob Herring 16900fa478dfSRob Herring /** 16910fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 16920fa478dfSRob Herring * @dev: valid struct device pointer 16930fa478dfSRob Herring * @sg: list of buffers 16940fa478dfSRob Herring * @nents: number of buffers to map 16950fa478dfSRob Herring * @dir: DMA transfer direction 16960fa478dfSRob Herring * 16970fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 16980fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 16990fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 17000fa478dfSRob Herring * sg_dma_{address,length}. 17010fa478dfSRob Herring */ 170217fe8684SBen Dooks static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 170300085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17040fa478dfSRob Herring { 17050fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 17060fa478dfSRob Herring } 17070fa478dfSRob Herring 17080fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 170900085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 171000085f1eSKrzysztof Kozlowski unsigned long attrs, bool is_coherent) 17110fa478dfSRob Herring { 17120fa478dfSRob Herring struct scatterlist *s; 17130fa478dfSRob Herring int i; 17140fa478dfSRob Herring 17150fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 17160fa478dfSRob Herring if (sg_dma_len(s)) 17170fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 17180fa478dfSRob Herring sg_dma_len(s)); 171900085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 17200fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 17210fa478dfSRob Herring s->length, dir); 17220fa478dfSRob Herring } 17230fa478dfSRob Herring } 17240fa478dfSRob Herring 17250fa478dfSRob Herring /** 17260fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17270fa478dfSRob Herring * @dev: valid struct device pointer 17280fa478dfSRob Herring * @sg: list of buffers 17290fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17300fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17310fa478dfSRob Herring * 17320fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 17330fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 17340fa478dfSRob Herring */ 173517fe8684SBen Dooks static void arm_coherent_iommu_unmap_sg(struct device *dev, 173617fe8684SBen Dooks struct scatterlist *sg, int nents, enum dma_data_direction dir, 173700085f1eSKrzysztof Kozlowski unsigned long attrs) 17380fa478dfSRob Herring { 17390fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 17400fa478dfSRob Herring } 17410fa478dfSRob Herring 17420fa478dfSRob Herring /** 17434ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17444ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17454ce63fcdSMarek Szyprowski * @sg: list of buffers 17464ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17474ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17484ce63fcdSMarek Szyprowski * 17494ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 17504ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 17514ce63fcdSMarek Szyprowski */ 175217fe8684SBen Dooks static void arm_iommu_unmap_sg(struct device *dev, 175317fe8684SBen Dooks struct scatterlist *sg, int nents, 175400085f1eSKrzysztof Kozlowski enum dma_data_direction dir, 175500085f1eSKrzysztof Kozlowski unsigned long attrs) 17564ce63fcdSMarek Szyprowski { 17570fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 17584ce63fcdSMarek Szyprowski } 17594ce63fcdSMarek Szyprowski 17604ce63fcdSMarek Szyprowski /** 17614ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 17624ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17634ce63fcdSMarek Szyprowski * @sg: list of buffers 17644ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 17654ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17664ce63fcdSMarek Szyprowski */ 176717fe8684SBen Dooks static void arm_iommu_sync_sg_for_cpu(struct device *dev, 176817fe8684SBen Dooks struct scatterlist *sg, 17694ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 17704ce63fcdSMarek Szyprowski { 17714ce63fcdSMarek Szyprowski struct scatterlist *s; 17724ce63fcdSMarek Szyprowski int i; 17734ce63fcdSMarek Szyprowski 17744ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 17754ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 17764ce63fcdSMarek Szyprowski 17774ce63fcdSMarek Szyprowski } 17784ce63fcdSMarek Szyprowski 17794ce63fcdSMarek Szyprowski /** 17804ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 17814ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17824ce63fcdSMarek Szyprowski * @sg: list of buffers 17834ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 17844ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17854ce63fcdSMarek Szyprowski */ 178617fe8684SBen Dooks static void arm_iommu_sync_sg_for_device(struct device *dev, 178717fe8684SBen Dooks struct scatterlist *sg, 17884ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 17894ce63fcdSMarek Szyprowski { 17904ce63fcdSMarek Szyprowski struct scatterlist *s; 17914ce63fcdSMarek Szyprowski int i; 17924ce63fcdSMarek Szyprowski 17934ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 17944ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 17954ce63fcdSMarek Szyprowski } 17964ce63fcdSMarek Szyprowski 17974ce63fcdSMarek Szyprowski 17984ce63fcdSMarek Szyprowski /** 17990fa478dfSRob Herring * arm_coherent_iommu_map_page 18000fa478dfSRob Herring * @dev: valid struct device pointer 18010fa478dfSRob Herring * @page: page that buffer resides in 18020fa478dfSRob Herring * @offset: offset into page for start of buffer 18030fa478dfSRob Herring * @size: size of buffer to map 18040fa478dfSRob Herring * @dir: DMA transfer direction 18050fa478dfSRob Herring * 18060fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 18070fa478dfSRob Herring */ 18080fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 18090fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 181000085f1eSKrzysztof Kozlowski unsigned long attrs) 18110fa478dfSRob Herring { 181289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18130fa478dfSRob Herring dma_addr_t dma_addr; 181413987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 18150fa478dfSRob Herring 18160fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 181772fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 18180fa478dfSRob Herring return dma_addr; 18190fa478dfSRob Herring 18207d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 182113987d68SWill Deacon 182213987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 18230fa478dfSRob Herring if (ret < 0) 18240fa478dfSRob Herring goto fail; 18250fa478dfSRob Herring 18260fa478dfSRob Herring return dma_addr + offset; 18270fa478dfSRob Herring fail: 18280fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 182972fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 18300fa478dfSRob Herring } 18310fa478dfSRob Herring 18320fa478dfSRob Herring /** 18334ce63fcdSMarek Szyprowski * arm_iommu_map_page 18344ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18354ce63fcdSMarek Szyprowski * @page: page that buffer resides in 18364ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 18374ce63fcdSMarek Szyprowski * @size: size of buffer to map 18384ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 18394ce63fcdSMarek Szyprowski * 18404ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 18414ce63fcdSMarek Szyprowski */ 18424ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 18434ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 184400085f1eSKrzysztof Kozlowski unsigned long attrs) 18454ce63fcdSMarek Szyprowski { 184600085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 18474ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 18484ce63fcdSMarek Szyprowski 18490fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 18500fa478dfSRob Herring } 18514ce63fcdSMarek Szyprowski 18520fa478dfSRob Herring /** 18530fa478dfSRob Herring * arm_coherent_iommu_unmap_page 18540fa478dfSRob Herring * @dev: valid struct device pointer 18550fa478dfSRob Herring * @handle: DMA address of buffer 18560fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 18570fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 18580fa478dfSRob Herring * 18590fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 18600fa478dfSRob Herring */ 18610fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 186200085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 18630fa478dfSRob Herring { 186489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18650fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 18660fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 18670fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 18684ce63fcdSMarek Szyprowski 18690fa478dfSRob Herring if (!iova) 18700fa478dfSRob Herring return; 18710fa478dfSRob Herring 18720fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 18730fa478dfSRob Herring __free_iova(mapping, iova, len); 18744ce63fcdSMarek Szyprowski } 18754ce63fcdSMarek Szyprowski 18764ce63fcdSMarek Szyprowski /** 18774ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 18784ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18794ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 18804ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 18814ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 18824ce63fcdSMarek Szyprowski * 18834ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 18844ce63fcdSMarek Szyprowski */ 18854ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 188600085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 18874ce63fcdSMarek Szyprowski { 188889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18894ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 18904ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 18914ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 18924ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 18934ce63fcdSMarek Szyprowski 18944ce63fcdSMarek Szyprowski if (!iova) 18954ce63fcdSMarek Szyprowski return; 18964ce63fcdSMarek Szyprowski 189700085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 18984ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 18994ce63fcdSMarek Szyprowski 19004ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 19014ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 19024ce63fcdSMarek Szyprowski } 19034ce63fcdSMarek Szyprowski 190424ed5d2cSNiklas Söderlund /** 190524ed5d2cSNiklas Söderlund * arm_iommu_map_resource - map a device resource for DMA 190624ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 190724ed5d2cSNiklas Söderlund * @phys_addr: physical address of resource 190824ed5d2cSNiklas Söderlund * @size: size of resource to map 190924ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 191024ed5d2cSNiklas Söderlund */ 191124ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev, 191224ed5d2cSNiklas Söderlund phys_addr_t phys_addr, size_t size, 191324ed5d2cSNiklas Söderlund enum dma_data_direction dir, unsigned long attrs) 191424ed5d2cSNiklas Söderlund { 191524ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 191624ed5d2cSNiklas Söderlund dma_addr_t dma_addr; 191724ed5d2cSNiklas Söderlund int ret, prot; 191824ed5d2cSNiklas Söderlund phys_addr_t addr = phys_addr & PAGE_MASK; 191924ed5d2cSNiklas Söderlund unsigned int offset = phys_addr & ~PAGE_MASK; 192024ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 192124ed5d2cSNiklas Söderlund 192224ed5d2cSNiklas Söderlund dma_addr = __alloc_iova(mapping, len); 192372fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 192424ed5d2cSNiklas Söderlund return dma_addr; 192524ed5d2cSNiklas Söderlund 19267d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 192724ed5d2cSNiklas Söderlund 192824ed5d2cSNiklas Söderlund ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 192924ed5d2cSNiklas Söderlund if (ret < 0) 193024ed5d2cSNiklas Söderlund goto fail; 193124ed5d2cSNiklas Söderlund 193224ed5d2cSNiklas Söderlund return dma_addr + offset; 193324ed5d2cSNiklas Söderlund fail: 193424ed5d2cSNiklas Söderlund __free_iova(mapping, dma_addr, len); 193572fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 193624ed5d2cSNiklas Söderlund } 193724ed5d2cSNiklas Söderlund 193824ed5d2cSNiklas Söderlund /** 193924ed5d2cSNiklas Söderlund * arm_iommu_unmap_resource - unmap a device DMA resource 194024ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 194124ed5d2cSNiklas Söderlund * @dma_handle: DMA address to resource 194224ed5d2cSNiklas Söderlund * @size: size of resource to map 194324ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 194424ed5d2cSNiklas Söderlund */ 194524ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 194624ed5d2cSNiklas Söderlund size_t size, enum dma_data_direction dir, 194724ed5d2cSNiklas Söderlund unsigned long attrs) 194824ed5d2cSNiklas Söderlund { 194924ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 195024ed5d2cSNiklas Söderlund dma_addr_t iova = dma_handle & PAGE_MASK; 195124ed5d2cSNiklas Söderlund unsigned int offset = dma_handle & ~PAGE_MASK; 195224ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 195324ed5d2cSNiklas Söderlund 195424ed5d2cSNiklas Söderlund if (!iova) 195524ed5d2cSNiklas Söderlund return; 195624ed5d2cSNiklas Söderlund 195724ed5d2cSNiklas Söderlund iommu_unmap(mapping->domain, iova, len); 195824ed5d2cSNiklas Söderlund __free_iova(mapping, iova, len); 195924ed5d2cSNiklas Söderlund } 196024ed5d2cSNiklas Söderlund 19614ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 19624ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 19634ce63fcdSMarek Szyprowski { 196489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19654ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19664ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19674ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 19684ce63fcdSMarek Szyprowski 19694ce63fcdSMarek Szyprowski if (!iova) 19704ce63fcdSMarek Szyprowski return; 19714ce63fcdSMarek Szyprowski 19724ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 19734ce63fcdSMarek Szyprowski } 19744ce63fcdSMarek Szyprowski 19754ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 19764ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 19774ce63fcdSMarek Szyprowski { 197889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19794ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19804ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19814ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 19824ce63fcdSMarek Szyprowski 19834ce63fcdSMarek Szyprowski if (!iova) 19844ce63fcdSMarek Szyprowski return; 19854ce63fcdSMarek Szyprowski 19864ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 19874ce63fcdSMarek Szyprowski } 19884ce63fcdSMarek Szyprowski 198917fe8684SBen Dooks static const struct dma_map_ops iommu_ops = { 19904ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 19914ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 19924ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1993dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 19944ce63fcdSMarek Szyprowski 19954ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 19964ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 19974ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 19984ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 19994ce63fcdSMarek Szyprowski 20004ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 20014ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 20024ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 20034ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 200424ed5d2cSNiklas Söderlund 200524ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 200624ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20079eef8b8cSChristoph Hellwig 2008418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20094ce63fcdSMarek Szyprowski }; 20104ce63fcdSMarek Szyprowski 201117fe8684SBen Dooks static const struct dma_map_ops iommu_coherent_ops = { 201256506822SGregory CLEMENT .alloc = arm_coherent_iommu_alloc_attrs, 201356506822SGregory CLEMENT .free = arm_coherent_iommu_free_attrs, 201456506822SGregory CLEMENT .mmap = arm_coherent_iommu_mmap_attrs, 20150fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 20160fa478dfSRob Herring 20170fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 20180fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 20190fa478dfSRob Herring 20200fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 20210fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 202224ed5d2cSNiklas Söderlund 202324ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 202424ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20259eef8b8cSChristoph Hellwig 2026418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20270fa478dfSRob Herring }; 20280fa478dfSRob Herring 20294ce63fcdSMarek Szyprowski /** 20304ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 20314ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 20324ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 203368efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 20344ce63fcdSMarek Szyprowski * 20354ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 20364ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 20374ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 20384ce63fcdSMarek Szyprowski * 20394ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 20404ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 20414ce63fcdSMarek Szyprowski */ 20424ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 20431424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 20444ce63fcdSMarek Szyprowski { 204568efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 204668efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 20474ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 204868efd7d2SMarek Szyprowski int extensions = 1; 20494ce63fcdSMarek Szyprowski int err = -ENOMEM; 20504ce63fcdSMarek Szyprowski 20511424532bSMarek Szyprowski /* currently only 32-bit DMA address space is supported */ 20521424532bSMarek Szyprowski if (size > DMA_BIT_MASK(32) + 1) 20531424532bSMarek Szyprowski return ERR_PTR(-ERANGE); 20541424532bSMarek Szyprowski 205568efd7d2SMarek Szyprowski if (!bitmap_size) 20564ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 20574ce63fcdSMarek Szyprowski 205868efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 205968efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 206068efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 206168efd7d2SMarek Szyprowski } 206268efd7d2SMarek Szyprowski 20634ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 20644ce63fcdSMarek Szyprowski if (!mapping) 20654ce63fcdSMarek Szyprowski goto err; 20664ce63fcdSMarek Szyprowski 206768efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 20686396bb22SKees Cook mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 20694d852ef8SAndreas Herrmann GFP_KERNEL); 20704d852ef8SAndreas Herrmann if (!mapping->bitmaps) 20714ce63fcdSMarek Szyprowski goto err2; 20724ce63fcdSMarek Szyprowski 207368efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 20744d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 20754d852ef8SAndreas Herrmann goto err3; 20764d852ef8SAndreas Herrmann 20774d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 20784d852ef8SAndreas Herrmann mapping->extensions = extensions; 20794ce63fcdSMarek Szyprowski mapping->base = base; 208068efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 20814d852ef8SAndreas Herrmann 20824ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 20834ce63fcdSMarek Szyprowski 20844ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 20854ce63fcdSMarek Szyprowski if (!mapping->domain) 20864d852ef8SAndreas Herrmann goto err4; 20874ce63fcdSMarek Szyprowski 20884ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 20894ce63fcdSMarek Szyprowski return mapping; 20904d852ef8SAndreas Herrmann err4: 20914d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 20924ce63fcdSMarek Szyprowski err3: 20934d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 20944ce63fcdSMarek Szyprowski err2: 20954ce63fcdSMarek Szyprowski kfree(mapping); 20964ce63fcdSMarek Szyprowski err: 20974ce63fcdSMarek Szyprowski return ERR_PTR(err); 20984ce63fcdSMarek Szyprowski } 209918177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 21004ce63fcdSMarek Szyprowski 21014ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 21024ce63fcdSMarek Szyprowski { 21034d852ef8SAndreas Herrmann int i; 21044ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 21054ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 21064ce63fcdSMarek Szyprowski 21074ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 21084d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 21094d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 21104d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21114ce63fcdSMarek Szyprowski kfree(mapping); 21124ce63fcdSMarek Szyprowski } 21134ce63fcdSMarek Szyprowski 21144d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 21154d852ef8SAndreas Herrmann { 21164d852ef8SAndreas Herrmann int next_bitmap; 21174d852ef8SAndreas Herrmann 2118462859aaSMarek Szyprowski if (mapping->nr_bitmaps >= mapping->extensions) 21194d852ef8SAndreas Herrmann return -EINVAL; 21204d852ef8SAndreas Herrmann 21214d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 21224d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 21234d852ef8SAndreas Herrmann GFP_ATOMIC); 21244d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 21254d852ef8SAndreas Herrmann return -ENOMEM; 21264d852ef8SAndreas Herrmann 21274d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 21284d852ef8SAndreas Herrmann 21294d852ef8SAndreas Herrmann return 0; 21304d852ef8SAndreas Herrmann } 21314d852ef8SAndreas Herrmann 21324ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 21334ce63fcdSMarek Szyprowski { 21344ce63fcdSMarek Szyprowski if (mapping) 21354ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 21364ce63fcdSMarek Szyprowski } 213718177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 21384ce63fcdSMarek Szyprowski 2139eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev, 21404ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 21414ce63fcdSMarek Szyprowski { 21424ce63fcdSMarek Szyprowski int err; 21434ce63fcdSMarek Szyprowski 21444ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 21454ce63fcdSMarek Szyprowski if (err) 21464ce63fcdSMarek Szyprowski return err; 21474ce63fcdSMarek Szyprowski 21484ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 214989cfdb19SWill Deacon to_dma_iommu_mapping(dev) = mapping; 21504ce63fcdSMarek Szyprowski 215175c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 21524ce63fcdSMarek Szyprowski return 0; 21534ce63fcdSMarek Szyprowski } 21544ce63fcdSMarek Szyprowski 21556fe36758SHiroshi Doyu /** 2156eab8d653SLaurent Pinchart * arm_iommu_attach_device 21576fe36758SHiroshi Doyu * @dev: valid struct device pointer 2158eab8d653SLaurent Pinchart * @mapping: io address space mapping structure (returned from 2159eab8d653SLaurent Pinchart * arm_iommu_create_mapping) 21606fe36758SHiroshi Doyu * 2161eab8d653SLaurent Pinchart * Attaches specified io address space mapping to the provided device. 2162eab8d653SLaurent Pinchart * This replaces the dma operations (dma_map_ops pointer) with the 2163eab8d653SLaurent Pinchart * IOMMU aware version. 2164eab8d653SLaurent Pinchart * 2165eab8d653SLaurent Pinchart * More than one client might be attached to the same io address space 2166eab8d653SLaurent Pinchart * mapping. 21676fe36758SHiroshi Doyu */ 2168eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev, 2169eab8d653SLaurent Pinchart struct dma_iommu_mapping *mapping) 2170eab8d653SLaurent Pinchart { 2171eab8d653SLaurent Pinchart int err; 2172eab8d653SLaurent Pinchart 2173eab8d653SLaurent Pinchart err = __arm_iommu_attach_device(dev, mapping); 2174eab8d653SLaurent Pinchart if (err) 2175eab8d653SLaurent Pinchart return err; 2176eab8d653SLaurent Pinchart 2177eab8d653SLaurent Pinchart set_dma_ops(dev, &iommu_ops); 2178eab8d653SLaurent Pinchart return 0; 2179eab8d653SLaurent Pinchart } 2180eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2181eab8d653SLaurent Pinchart 2182d3e01c51SSricharan R /** 2183d3e01c51SSricharan R * arm_iommu_detach_device 2184d3e01c51SSricharan R * @dev: valid struct device pointer 2185d3e01c51SSricharan R * 2186d3e01c51SSricharan R * Detaches the provided device from a previously attached map. 21874a4d68fcSWolfram Sang (Renesas) * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 2188d3e01c51SSricharan R */ 2189d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev) 21906fe36758SHiroshi Doyu { 21916fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 21926fe36758SHiroshi Doyu 21936fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 21946fe36758SHiroshi Doyu if (!mapping) { 21956fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 21966fe36758SHiroshi Doyu return; 21976fe36758SHiroshi Doyu } 21986fe36758SHiroshi Doyu 21996fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 22006fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 220189cfdb19SWill Deacon to_dma_iommu_mapping(dev) = NULL; 22021874619aSThierry Reding set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 22036fe36758SHiroshi Doyu 22046fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 22056fe36758SHiroshi Doyu } 220618177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 22076fe36758SHiroshi Doyu 22085299709dSBart Van Assche static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 22094bb25789SWill Deacon { 22104bb25789SWill Deacon return coherent ? &iommu_coherent_ops : &iommu_ops; 22114bb25789SWill Deacon } 22124bb25789SWill Deacon 22134bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 221453c92d79SRobin Murphy const struct iommu_ops *iommu) 22154bb25789SWill Deacon { 22164bb25789SWill Deacon struct dma_iommu_mapping *mapping; 22174bb25789SWill Deacon 22184bb25789SWill Deacon if (!iommu) 22194bb25789SWill Deacon return false; 22204bb25789SWill Deacon 22214bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 22224bb25789SWill Deacon if (IS_ERR(mapping)) { 22234bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 22244bb25789SWill Deacon size, dev_name(dev)); 22254bb25789SWill Deacon return false; 22264bb25789SWill Deacon } 22274bb25789SWill Deacon 2228eab8d653SLaurent Pinchart if (__arm_iommu_attach_device(dev, mapping)) { 22294bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 22304bb25789SWill Deacon dev_name(dev)); 22314bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22324bb25789SWill Deacon return false; 22334bb25789SWill Deacon } 22344bb25789SWill Deacon 22354bb25789SWill Deacon return true; 22364bb25789SWill Deacon } 22374bb25789SWill Deacon 22384bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 22394bb25789SWill Deacon { 224089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 22414bb25789SWill Deacon 2242c2273a18SWill Deacon if (!mapping) 2243c2273a18SWill Deacon return; 2244c2273a18SWill Deacon 2245d3e01c51SSricharan R arm_iommu_detach_device(dev); 22464bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22474bb25789SWill Deacon } 22484bb25789SWill Deacon 22494bb25789SWill Deacon #else 22504bb25789SWill Deacon 22514bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 225253c92d79SRobin Murphy const struct iommu_ops *iommu) 22534bb25789SWill Deacon { 22544bb25789SWill Deacon return false; 22554bb25789SWill Deacon } 22564bb25789SWill Deacon 22574bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 22584bb25789SWill Deacon 22594bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 22604bb25789SWill Deacon 22614bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 22624bb25789SWill Deacon 22634bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 226453c92d79SRobin Murphy const struct iommu_ops *iommu, bool coherent) 22654bb25789SWill Deacon { 22665299709dSBart Van Assche const struct dma_map_ops *dma_ops; 22674bb25789SWill Deacon 22686f51ee70SLinus Torvalds dev->archdata.dma_coherent = coherent; 2269ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 2270ad3c7b18SChristoph Hellwig dev->dma_coherent = coherent; 2271ad3c7b18SChristoph Hellwig #endif 227226b37b94SLaurent Pinchart 227326b37b94SLaurent Pinchart /* 227426b37b94SLaurent Pinchart * Don't override the dma_ops if they have already been set. Ideally 227526b37b94SLaurent Pinchart * this should be the only location where dma_ops are set, remove this 227626b37b94SLaurent Pinchart * check when all other callers of set_dma_ops will have disappeared. 227726b37b94SLaurent Pinchart */ 227826b37b94SLaurent Pinchart if (dev->dma_ops) 227926b37b94SLaurent Pinchart return; 228026b37b94SLaurent Pinchart 22814bb25789SWill Deacon if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 22824bb25789SWill Deacon dma_ops = arm_get_iommu_dma_map_ops(coherent); 22834bb25789SWill Deacon else 22844bb25789SWill Deacon dma_ops = arm_get_dma_map_ops(coherent); 22854bb25789SWill Deacon 22864bb25789SWill Deacon set_dma_ops(dev, dma_ops); 2287e0586326SStefano Stabellini 2288e0586326SStefano Stabellini #ifdef CONFIG_XEN 22898e23c82cSChristoph Hellwig if (xen_initial_domain()) 22900e0d26e7SChristoph Hellwig dev->dma_ops = &xen_swiotlb_dma_ops; 2291e0586326SStefano Stabellini #endif 2292a93a121aSLaurent Pinchart dev->archdata.dma_ops_setup = true; 22934bb25789SWill Deacon } 22944bb25789SWill Deacon 22954bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 22964bb25789SWill Deacon { 2297a93a121aSLaurent Pinchart if (!dev->archdata.dma_ops_setup) 2298a93a121aSLaurent Pinchart return; 2299a93a121aSLaurent Pinchart 23004bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 2301fc67e6f1SRobin Murphy /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2302fc67e6f1SRobin Murphy set_dma_ops(dev, NULL); 23034bb25789SWill Deacon } 2304ad3c7b18SChristoph Hellwig 2305ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 230656e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 230756e35f9cSChristoph Hellwig enum dma_data_direction dir) 2308ad3c7b18SChristoph Hellwig { 2309ad3c7b18SChristoph Hellwig __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2310ad3c7b18SChristoph Hellwig size, dir); 2311ad3c7b18SChristoph Hellwig } 2312ad3c7b18SChristoph Hellwig 231356e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 231456e35f9cSChristoph Hellwig enum dma_data_direction dir) 2315ad3c7b18SChristoph Hellwig { 2316ad3c7b18SChristoph Hellwig __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2317ad3c7b18SChristoph Hellwig size, dir); 2318ad3c7b18SChristoph Hellwig } 2319ad3c7b18SChristoph Hellwig 2320ad3c7b18SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2321ad3c7b18SChristoph Hellwig gfp_t gfp, unsigned long attrs) 2322ad3c7b18SChristoph Hellwig { 2323ad3c7b18SChristoph Hellwig return __dma_alloc(dev, size, dma_handle, gfp, 2324ad3c7b18SChristoph Hellwig __get_dma_pgprot(attrs, PAGE_KERNEL), false, 2325ad3c7b18SChristoph Hellwig attrs, __builtin_return_address(0)); 2326ad3c7b18SChristoph Hellwig } 2327ad3c7b18SChristoph Hellwig 2328ad3c7b18SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 2329ad3c7b18SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 2330ad3c7b18SChristoph Hellwig { 2331ad3c7b18SChristoph Hellwig __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 2332ad3c7b18SChristoph Hellwig } 2333ad3c7b18SChristoph Hellwig #endif /* CONFIG_SWIOTLB */ 2334