10ddbccd1SRussell King /* 20ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 30ddbccd1SRussell King * 40ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 50ddbccd1SRussell King * 60ddbccd1SRussell King * This program is free software; you can redistribute it and/or modify 70ddbccd1SRussell King * it under the terms of the GNU General Public License version 2 as 80ddbccd1SRussell King * published by the Free Software Foundation. 90ddbccd1SRussell King * 100ddbccd1SRussell King * DMA uncached mapping support. 110ddbccd1SRussell King */ 120ddbccd1SRussell King #include <linux/module.h> 130ddbccd1SRussell King #include <linux/mm.h> 145a0e3ad6STejun Heo #include <linux/gfp.h> 150ddbccd1SRussell King #include <linux/errno.h> 160ddbccd1SRussell King #include <linux/list.h> 170ddbccd1SRussell King #include <linux/init.h> 180ddbccd1SRussell King #include <linux/device.h> 190ddbccd1SRussell King #include <linux/dma-mapping.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 280ddbccd1SRussell King 290ddbccd1SRussell King #include <asm/memory.h> 3043377453SNicolas Pitre #include <asm/highmem.h> 310ddbccd1SRussell King #include <asm/cacheflush.h> 320ddbccd1SRussell King #include <asm/tlbflush.h> 3399d1717dSJon Medhurst #include <asm/mach/arch.h> 344ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 35c7909509SMarek Szyprowski #include <asm/mach/map.h> 36c7909509SMarek Szyprowski #include <asm/system_info.h> 37c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 380ddbccd1SRussell King 39022ae537SRussell King #include "mm.h" 40022ae537SRussell King 4115237e1fSMarek Szyprowski /* 4215237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 4315237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 4415237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 4515237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 4615237e1fSMarek Szyprowski * 4715237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 4815237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 4915237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 5015237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 5115237e1fSMarek Szyprowski * 5215237e1fSMarek Szyprowski */ 5351fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 5415237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5551fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 5615237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5715237e1fSMarek Szyprowski 582dc6a016SMarek Szyprowski /** 592dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 602dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 612dc6a016SMarek Szyprowski * @page: page that buffer resides in 622dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 632dc6a016SMarek Szyprowski * @size: size of buffer to map 642dc6a016SMarek Szyprowski * @dir: DMA transfer direction 652dc6a016SMarek Szyprowski * 662dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 672dc6a016SMarek Szyprowski * or written back. 682dc6a016SMarek Szyprowski * 692dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 702dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 712dc6a016SMarek Szyprowski */ 7251fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 732dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 742dc6a016SMarek Szyprowski struct dma_attrs *attrs) 752dc6a016SMarek Szyprowski { 76dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 7751fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 7851fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 792dc6a016SMarek Szyprowski } 802dc6a016SMarek Szyprowski 81dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 82dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 83dd37e940SRob Herring struct dma_attrs *attrs) 84dd37e940SRob Herring { 85dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 86dd37e940SRob Herring } 87dd37e940SRob Herring 882dc6a016SMarek Szyprowski /** 892dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 902dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 912dc6a016SMarek Szyprowski * @handle: DMA address of buffer 922dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 932dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 942dc6a016SMarek Szyprowski * 952dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 962dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 972dc6a016SMarek Szyprowski * All other usages are undefined. 982dc6a016SMarek Szyprowski * 992dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1002dc6a016SMarek Szyprowski * whatever the device wrote there. 1012dc6a016SMarek Szyprowski */ 10251fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 1032dc6a016SMarek Szyprowski size_t size, enum dma_data_direction dir, 1042dc6a016SMarek Szyprowski struct dma_attrs *attrs) 1052dc6a016SMarek Szyprowski { 106dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 10751fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 10851fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1092dc6a016SMarek Szyprowski } 1102dc6a016SMarek Szyprowski 11151fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1122dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1132dc6a016SMarek Szyprowski { 1142dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1152dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1162dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1172dc6a016SMarek Szyprowski } 1182dc6a016SMarek Szyprowski 11951fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1202dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1212dc6a016SMarek Szyprowski { 1222dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1232dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1242dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1252dc6a016SMarek Szyprowski } 1262dc6a016SMarek Szyprowski 1272dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = { 128f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 129f99d6034SMarek Szyprowski .free = arm_dma_free, 130f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 131dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1322dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1332dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1342dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1352dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 1362dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1372dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1382dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1392dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 1402dc6a016SMarek Szyprowski .set_dma_mask = arm_dma_set_mask, 1412dc6a016SMarek Szyprowski }; 1422dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1432dc6a016SMarek Szyprowski 144dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 145dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 146dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 147dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs); 148dd37e940SRob Herring 149dd37e940SRob Herring struct dma_map_ops arm_coherent_dma_ops = { 150dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 151dd37e940SRob Herring .free = arm_coherent_dma_free, 152dd37e940SRob Herring .mmap = arm_dma_mmap, 153dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 154dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 155dd37e940SRob Herring .map_sg = arm_dma_map_sg, 156dd37e940SRob Herring .set_dma_mask = arm_dma_set_mask, 157dd37e940SRob Herring }; 158dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 159dd37e940SRob Herring 160ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 161ab6494f0SCatalin Marinas { 162022ae537SRussell King u64 mask = (u64)arm_dma_limit; 1630ddbccd1SRussell King 164ab6494f0SCatalin Marinas if (dev) { 165ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 166ab6494f0SCatalin Marinas 167ab6494f0SCatalin Marinas /* 168ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 169ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 170ab6494f0SCatalin Marinas */ 171ab6494f0SCatalin Marinas if (mask == 0) { 172ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 173ab6494f0SCatalin Marinas return 0; 174ab6494f0SCatalin Marinas } 175ab6494f0SCatalin Marinas 176022ae537SRussell King if ((~mask) & (u64)arm_dma_limit) { 177ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask %#llx is smaller " 178ab6494f0SCatalin Marinas "than system GFP_DMA mask %#llx\n", 179022ae537SRussell King mask, (u64)arm_dma_limit); 180ab6494f0SCatalin Marinas return 0; 181ab6494f0SCatalin Marinas } 182ab6494f0SCatalin Marinas } 183ab6494f0SCatalin Marinas 184ab6494f0SCatalin Marinas return mask; 185ab6494f0SCatalin Marinas } 186ab6494f0SCatalin Marinas 187c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size) 188c7909509SMarek Szyprowski { 189c7909509SMarek Szyprowski /* 190c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 191c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 192c7909509SMarek Szyprowski */ 1939848e48fSMarek Szyprowski if (PageHighMem(page)) { 1949848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 1959848e48fSMarek Szyprowski phys_addr_t end = base + size; 1969848e48fSMarek Szyprowski while (size > 0) { 1979848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 1989848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 1999848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2009848e48fSMarek Szyprowski kunmap_atomic(ptr); 2019848e48fSMarek Szyprowski page++; 2029848e48fSMarek Szyprowski size -= PAGE_SIZE; 2039848e48fSMarek Szyprowski } 2049848e48fSMarek Szyprowski outer_flush_range(base, end); 2059848e48fSMarek Szyprowski } else { 2069848e48fSMarek Szyprowski void *ptr = page_address(page); 207c7909509SMarek Szyprowski memset(ptr, 0, size); 208c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 209c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 210c7909509SMarek Szyprowski } 2114ce63fcdSMarek Szyprowski } 212c7909509SMarek Szyprowski 2137a9a32a9SRussell King /* 2147a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2157a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2167a9a32a9SRussell King */ 2177a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 2187a9a32a9SRussell King { 2197a9a32a9SRussell King unsigned long order = get_order(size); 2207a9a32a9SRussell King struct page *page, *p, *e; 2217a9a32a9SRussell King 2227a9a32a9SRussell King page = alloc_pages(gfp, order); 2237a9a32a9SRussell King if (!page) 2247a9a32a9SRussell King return NULL; 2257a9a32a9SRussell King 2267a9a32a9SRussell King /* 2277a9a32a9SRussell King * Now split the huge page and free the excess pages 2287a9a32a9SRussell King */ 2297a9a32a9SRussell King split_page(page, order); 2307a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2317a9a32a9SRussell King __free_page(p); 2327a9a32a9SRussell King 233c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 2347a9a32a9SRussell King 2357a9a32a9SRussell King return page; 2367a9a32a9SRussell King } 2377a9a32a9SRussell King 2387a9a32a9SRussell King /* 2397a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 2407a9a32a9SRussell King */ 2417a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 2427a9a32a9SRussell King { 2437a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 2447a9a32a9SRussell King 2457a9a32a9SRussell King while (page < e) { 2467a9a32a9SRussell King __free_page(page); 2477a9a32a9SRussell King page++; 2487a9a32a9SRussell King } 2497a9a32a9SRussell King } 2507a9a32a9SRussell King 251ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 2520ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE 2530ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB 2540ddbccd1SRussell King #endif 2550ddbccd1SRussell King 256c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 2579848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 2589848e48fSMarek Szyprowski const void *caller); 259c7909509SMarek Szyprowski 260e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 261e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 262e9da6e99SMarek Szyprowski const void *caller); 263e9da6e99SMarek Szyprowski 264e9da6e99SMarek Szyprowski static void * 265e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 266e9da6e99SMarek Szyprowski const void *caller) 267e9da6e99SMarek Szyprowski { 268e9da6e99SMarek Szyprowski struct vm_struct *area; 269e9da6e99SMarek Szyprowski unsigned long addr; 270e9da6e99SMarek Szyprowski 271e9da6e99SMarek Szyprowski /* 272e9da6e99SMarek Szyprowski * DMA allocation can be mapped to user space, so lets 273e9da6e99SMarek Szyprowski * set VM_USERMAP flags too. 274e9da6e99SMarek Szyprowski */ 275e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 276e9da6e99SMarek Szyprowski caller); 277e9da6e99SMarek Szyprowski if (!area) 278e9da6e99SMarek Szyprowski return NULL; 279e9da6e99SMarek Szyprowski addr = (unsigned long)area->addr; 280e9da6e99SMarek Szyprowski area->phys_addr = __pfn_to_phys(page_to_pfn(page)); 281e9da6e99SMarek Szyprowski 282e9da6e99SMarek Szyprowski if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { 283e9da6e99SMarek Szyprowski vunmap((void *)addr); 284e9da6e99SMarek Szyprowski return NULL; 285e9da6e99SMarek Szyprowski } 286e9da6e99SMarek Szyprowski return (void *)addr; 287e9da6e99SMarek Szyprowski } 288e9da6e99SMarek Szyprowski 289e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 290e9da6e99SMarek Szyprowski { 291e9da6e99SMarek Szyprowski unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 292e9da6e99SMarek Szyprowski struct vm_struct *area = find_vm_area(cpu_addr); 293e9da6e99SMarek Szyprowski if (!area || (area->flags & flags) != flags) { 294e9da6e99SMarek Szyprowski WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 295e9da6e99SMarek Szyprowski return; 296e9da6e99SMarek Szyprowski } 297e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 298e9da6e99SMarek Szyprowski vunmap(cpu_addr); 299e9da6e99SMarek Szyprowski } 300e9da6e99SMarek Szyprowski 3016e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 3026e5267aaSMarek Szyprowski 303e9da6e99SMarek Szyprowski struct dma_pool { 304e9da6e99SMarek Szyprowski size_t size; 305e9da6e99SMarek Szyprowski spinlock_t lock; 306e9da6e99SMarek Szyprowski unsigned long *bitmap; 307e9da6e99SMarek Szyprowski unsigned long nr_pages; 308e9da6e99SMarek Szyprowski void *vaddr; 3096b3fe472SHiroshi Doyu struct page **pages; 310c7909509SMarek Szyprowski }; 311c7909509SMarek Szyprowski 312e9da6e99SMarek Szyprowski static struct dma_pool atomic_pool = { 3136e5267aaSMarek Szyprowski .size = DEFAULT_DMA_COHERENT_POOL_SIZE, 314e9da6e99SMarek Szyprowski }; 315c7909509SMarek Szyprowski 316c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 317c7909509SMarek Szyprowski { 318e9da6e99SMarek Szyprowski atomic_pool.size = memparse(p, &p); 319c7909509SMarek Szyprowski return 0; 320c7909509SMarek Szyprowski } 321c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 322c7909509SMarek Szyprowski 3236e5267aaSMarek Szyprowski void __init init_dma_coherent_pool_size(unsigned long size) 3246e5267aaSMarek Szyprowski { 3256e5267aaSMarek Szyprowski /* 3266e5267aaSMarek Szyprowski * Catch any attempt to set the pool size too late. 3276e5267aaSMarek Szyprowski */ 3286e5267aaSMarek Szyprowski BUG_ON(atomic_pool.vaddr); 3296e5267aaSMarek Szyprowski 3306e5267aaSMarek Szyprowski /* 3316e5267aaSMarek Szyprowski * Set architecture specific coherent pool size only if 3326e5267aaSMarek Szyprowski * it has not been changed by kernel command line parameter. 3336e5267aaSMarek Szyprowski */ 3346e5267aaSMarek Szyprowski if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 3356e5267aaSMarek Szyprowski atomic_pool.size = size; 3366e5267aaSMarek Szyprowski } 3376e5267aaSMarek Szyprowski 338c7909509SMarek Szyprowski /* 339c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 340c7909509SMarek Szyprowski */ 341e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 342c7909509SMarek Szyprowski { 343e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 344c7909509SMarek Szyprowski pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 3459d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 346e9da6e99SMarek Szyprowski unsigned long nr_pages = pool->size >> PAGE_SHIFT; 347e9da6e99SMarek Szyprowski unsigned long *bitmap; 348c7909509SMarek Szyprowski struct page *page; 3496b3fe472SHiroshi Doyu struct page **pages; 350c7909509SMarek Szyprowski void *ptr; 351e9da6e99SMarek Szyprowski int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 352c7909509SMarek Szyprowski 353e9da6e99SMarek Szyprowski bitmap = kzalloc(bitmap_size, GFP_KERNEL); 354e9da6e99SMarek Szyprowski if (!bitmap) 355e9da6e99SMarek Szyprowski goto no_bitmap; 356c7909509SMarek Szyprowski 3576b3fe472SHiroshi Doyu pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 3586b3fe472SHiroshi Doyu if (!pages) 3596b3fe472SHiroshi Doyu goto no_pages; 3606b3fe472SHiroshi Doyu 361e9da6e99SMarek Szyprowski if (IS_ENABLED(CONFIG_CMA)) 3629848e48fSMarek Szyprowski ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 3639848e48fSMarek Szyprowski atomic_pool_init); 364e9da6e99SMarek Szyprowski else 3659d1400cfSMarek Szyprowski ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, 3669d1400cfSMarek Szyprowski atomic_pool_init); 367c7909509SMarek Szyprowski if (ptr) { 3686b3fe472SHiroshi Doyu int i; 3696b3fe472SHiroshi Doyu 3706b3fe472SHiroshi Doyu for (i = 0; i < nr_pages; i++) 3716b3fe472SHiroshi Doyu pages[i] = page + i; 3726b3fe472SHiroshi Doyu 373e9da6e99SMarek Szyprowski spin_lock_init(&pool->lock); 374e9da6e99SMarek Szyprowski pool->vaddr = ptr; 3756b3fe472SHiroshi Doyu pool->pages = pages; 376e9da6e99SMarek Szyprowski pool->bitmap = bitmap; 377e9da6e99SMarek Szyprowski pool->nr_pages = nr_pages; 378e9da6e99SMarek Szyprowski pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 379e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 380c7909509SMarek Szyprowski return 0; 381c7909509SMarek Szyprowski } 382ec10665cSSachin Kamat 383ec10665cSSachin Kamat kfree(pages); 3846b3fe472SHiroshi Doyu no_pages: 385e9da6e99SMarek Szyprowski kfree(bitmap); 386e9da6e99SMarek Szyprowski no_bitmap: 387e9da6e99SMarek Szyprowski pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 388e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 389c7909509SMarek Szyprowski return -ENOMEM; 390c7909509SMarek Szyprowski } 391c7909509SMarek Szyprowski /* 392c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 393c7909509SMarek Szyprowski */ 394e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 395c7909509SMarek Szyprowski 396c7909509SMarek Szyprowski struct dma_contig_early_reserve { 397c7909509SMarek Szyprowski phys_addr_t base; 398c7909509SMarek Szyprowski unsigned long size; 399c7909509SMarek Szyprowski }; 400c7909509SMarek Szyprowski 401c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 402c7909509SMarek Szyprowski 403c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 404c7909509SMarek Szyprowski 405c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 406c7909509SMarek Szyprowski { 407c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 408c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 409c7909509SMarek Szyprowski dma_mmu_remap_num++; 410c7909509SMarek Szyprowski } 411c7909509SMarek Szyprowski 412c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 413c7909509SMarek Szyprowski { 414c7909509SMarek Szyprowski int i; 415c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 416c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 417c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 418c7909509SMarek Szyprowski struct map_desc map; 419c7909509SMarek Szyprowski unsigned long addr; 420c7909509SMarek Szyprowski 421c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 422c7909509SMarek Szyprowski end = arm_lowmem_limit; 423c7909509SMarek Szyprowski if (start >= end) 42439f78e70SChris Brand continue; 425c7909509SMarek Szyprowski 426c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 427c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 428c7909509SMarek Szyprowski map.length = end - start; 429c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 430c7909509SMarek Szyprowski 431c7909509SMarek Szyprowski /* 432c7909509SMarek Szyprowski * Clear previous low-memory mapping 433c7909509SMarek Szyprowski */ 434c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 43561f6c7a4SVitaly Andrianov addr += PMD_SIZE) 436c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 437c7909509SMarek Szyprowski 438c7909509SMarek Szyprowski iotable_init(&map, 1); 439c7909509SMarek Szyprowski } 440c7909509SMarek Szyprowski } 441c7909509SMarek Szyprowski 442c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 443c7909509SMarek Szyprowski void *data) 444c7909509SMarek Szyprowski { 445c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 446c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 447c7909509SMarek Szyprowski 448c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 449c7909509SMarek Szyprowski return 0; 450c7909509SMarek Szyprowski } 451c7909509SMarek Szyprowski 452c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 453c7909509SMarek Szyprowski { 454c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 455c7909509SMarek Szyprowski unsigned end = start + size; 456c7909509SMarek Szyprowski 457c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 458c7909509SMarek Szyprowski dsb(); 459c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 460c7909509SMarek Szyprowski } 461c7909509SMarek Szyprowski 462c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 463c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 464c7909509SMarek Szyprowski const void *caller) 465c7909509SMarek Szyprowski { 466c7909509SMarek Szyprowski struct page *page; 467c7909509SMarek Szyprowski void *ptr; 468c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 469c7909509SMarek Szyprowski if (!page) 470c7909509SMarek Szyprowski return NULL; 471c7909509SMarek Szyprowski 472c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 473c7909509SMarek Szyprowski if (!ptr) { 474c7909509SMarek Szyprowski __dma_free_buffer(page, size); 475c7909509SMarek Szyprowski return NULL; 476c7909509SMarek Szyprowski } 477c7909509SMarek Szyprowski 478c7909509SMarek Szyprowski *ret_page = page; 479c7909509SMarek Szyprowski return ptr; 480c7909509SMarek Szyprowski } 481c7909509SMarek Szyprowski 482e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 483c7909509SMarek Szyprowski { 484e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 485e9da6e99SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 486e9da6e99SMarek Szyprowski unsigned int pageno; 487e9da6e99SMarek Szyprowski unsigned long flags; 488e9da6e99SMarek Szyprowski void *ptr = NULL; 489e4ea6918SAaro Koskinen unsigned long align_mask; 490c7909509SMarek Szyprowski 491e9da6e99SMarek Szyprowski if (!pool->vaddr) { 492e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 493c7909509SMarek Szyprowski return NULL; 494c7909509SMarek Szyprowski } 495c7909509SMarek Szyprowski 496c7909509SMarek Szyprowski /* 497c7909509SMarek Szyprowski * Align the region allocation - allocations from pool are rather 498c7909509SMarek Szyprowski * small, so align them to their order in pages, minimum is a page 499c7909509SMarek Szyprowski * size. This helps reduce fragmentation of the DMA space. 500c7909509SMarek Szyprowski */ 501e4ea6918SAaro Koskinen align_mask = (1 << get_order(size)) - 1; 502e9da6e99SMarek Szyprowski 503e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 504e9da6e99SMarek Szyprowski pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 505e4ea6918SAaro Koskinen 0, count, align_mask); 506e9da6e99SMarek Szyprowski if (pageno < pool->nr_pages) { 507e9da6e99SMarek Szyprowski bitmap_set(pool->bitmap, pageno, count); 508e9da6e99SMarek Szyprowski ptr = pool->vaddr + PAGE_SIZE * pageno; 5096b3fe472SHiroshi Doyu *ret_page = pool->pages[pageno]; 510fb71285fSMarek Szyprowski } else { 511fb71285fSMarek Szyprowski pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 512fb71285fSMarek Szyprowski "Please increase it with coherent_pool= kernel parameter!\n", 513fb71285fSMarek Szyprowski (unsigned)pool->size / 1024); 514e9da6e99SMarek Szyprowski } 515e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 516e9da6e99SMarek Szyprowski 517c7909509SMarek Szyprowski return ptr; 518c7909509SMarek Szyprowski } 519c7909509SMarek Szyprowski 52021d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 52121d0a759SHiroshi Doyu { 52221d0a759SHiroshi Doyu struct dma_pool *pool = &atomic_pool; 52321d0a759SHiroshi Doyu void *end = start + size; 52421d0a759SHiroshi Doyu void *pool_start = pool->vaddr; 52521d0a759SHiroshi Doyu void *pool_end = pool->vaddr + pool->size; 52621d0a759SHiroshi Doyu 527f3d87524SThomas Petazzoni if (start < pool_start || start >= pool_end) 52821d0a759SHiroshi Doyu return false; 52921d0a759SHiroshi Doyu 53021d0a759SHiroshi Doyu if (end <= pool_end) 53121d0a759SHiroshi Doyu return true; 53221d0a759SHiroshi Doyu 53321d0a759SHiroshi Doyu WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", 53421d0a759SHiroshi Doyu start, end - 1, pool_start, pool_end - 1); 53521d0a759SHiroshi Doyu 53621d0a759SHiroshi Doyu return false; 53721d0a759SHiroshi Doyu } 53821d0a759SHiroshi Doyu 539e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 540c7909509SMarek Szyprowski { 541e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 542e9da6e99SMarek Szyprowski unsigned long pageno, count; 543e9da6e99SMarek Szyprowski unsigned long flags; 544c7909509SMarek Szyprowski 54521d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 546c7909509SMarek Szyprowski return 0; 547c7909509SMarek Szyprowski 548e9da6e99SMarek Szyprowski pageno = (start - pool->vaddr) >> PAGE_SHIFT; 549e9da6e99SMarek Szyprowski count = size >> PAGE_SHIFT; 550e9da6e99SMarek Szyprowski 551e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 552e9da6e99SMarek Szyprowski bitmap_clear(pool->bitmap, pageno, count); 553e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 554e9da6e99SMarek Szyprowski 555c7909509SMarek Szyprowski return 1; 556c7909509SMarek Szyprowski } 557c7909509SMarek Szyprowski 558c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5599848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 5609848e48fSMarek Szyprowski const void *caller) 561c7909509SMarek Szyprowski { 562c7909509SMarek Szyprowski unsigned long order = get_order(size); 563c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 564c7909509SMarek Szyprowski struct page *page; 5659848e48fSMarek Szyprowski void *ptr; 566c7909509SMarek Szyprowski 567c7909509SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 568c7909509SMarek Szyprowski if (!page) 569c7909509SMarek Szyprowski return NULL; 570c7909509SMarek Szyprowski 571c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 572c7909509SMarek Szyprowski 5739848e48fSMarek Szyprowski if (PageHighMem(page)) { 5749848e48fSMarek Szyprowski ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 5759848e48fSMarek Szyprowski if (!ptr) { 5769848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 5779848e48fSMarek Szyprowski return NULL; 5789848e48fSMarek Szyprowski } 5799848e48fSMarek Szyprowski } else { 5809848e48fSMarek Szyprowski __dma_remap(page, size, prot); 5819848e48fSMarek Szyprowski ptr = page_address(page); 5829848e48fSMarek Szyprowski } 583c7909509SMarek Szyprowski *ret_page = page; 5849848e48fSMarek Szyprowski return ptr; 585c7909509SMarek Szyprowski } 586c7909509SMarek Szyprowski 587c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 5889848e48fSMarek Szyprowski void *cpu_addr, size_t size) 589c7909509SMarek Szyprowski { 5909848e48fSMarek Szyprowski if (PageHighMem(page)) 5919848e48fSMarek Szyprowski __dma_free_remap(cpu_addr, size); 5929848e48fSMarek Szyprowski else 593c7909509SMarek Szyprowski __dma_remap(page, size, pgprot_kernel); 594c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 595c7909509SMarek Szyprowski } 596c7909509SMarek Szyprowski 597f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 598f99d6034SMarek Szyprowski { 599f99d6034SMarek Szyprowski prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 600f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 601f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 602f99d6034SMarek Szyprowski return prot; 603f99d6034SMarek Szyprowski } 604f99d6034SMarek Szyprowski 605c7909509SMarek Szyprowski #define nommu() 0 606c7909509SMarek Szyprowski 607ab6494f0SCatalin Marinas #else /* !CONFIG_MMU */ 608695ae0afSRussell King 609c7909509SMarek Szyprowski #define nommu() 1 610c7909509SMarek Szyprowski 611f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot) __pgprot(0) 612c7909509SMarek Szyprowski #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 613e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page) NULL 6149848e48fSMarek Szyprowski #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 615c7909509SMarek Szyprowski #define __free_from_pool(cpu_addr, size) 0 6169848e48fSMarek Szyprowski #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 617c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size) do { } while (0) 61831ebf944SRussell King 61931ebf944SRussell King #endif /* CONFIG_MMU */ 62031ebf944SRussell King 621c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 622c7909509SMarek Szyprowski struct page **ret_page) 623ab6494f0SCatalin Marinas { 62404da5694SRussell King struct page *page; 625c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 626c7909509SMarek Szyprowski if (!page) 627c7909509SMarek Szyprowski return NULL; 628c7909509SMarek Szyprowski 629c7909509SMarek Szyprowski *ret_page = page; 630c7909509SMarek Szyprowski return page_address(page); 631c7909509SMarek Szyprowski } 632c7909509SMarek Szyprowski 633c7909509SMarek Szyprowski 634c7909509SMarek Szyprowski 635c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 636dd37e940SRob Herring gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 637c7909509SMarek Szyprowski { 638c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 6393dd7ea92SJingoo Han struct page *page = NULL; 64031ebf944SRussell King void *addr; 641ab6494f0SCatalin Marinas 642c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 643c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 644c7909509SMarek Szyprowski if (limit && size >= limit) { 645c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 646c7909509SMarek Szyprowski size, mask); 647c7909509SMarek Szyprowski return NULL; 648c7909509SMarek Szyprowski } 649c7909509SMarek Szyprowski #endif 650c7909509SMarek Szyprowski 651c7909509SMarek Szyprowski if (!mask) 652c7909509SMarek Szyprowski return NULL; 653c7909509SMarek Szyprowski 654c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 655c7909509SMarek Szyprowski gfp |= GFP_DMA; 656c7909509SMarek Szyprowski 657ea2e7057SSumit Bhattacharya /* 658ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 659ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 660ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 661ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 662ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 663ea2e7057SSumit Bhattacharya */ 664ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 665ea2e7057SSumit Bhattacharya 666553ac788SMarek Szyprowski *handle = DMA_ERROR_CODE; 66704da5694SRussell King size = PAGE_ALIGN(size); 66804da5694SRussell King 669dd37e940SRob Herring if (is_coherent || nommu()) 670c7909509SMarek Szyprowski addr = __alloc_simple_buffer(dev, size, gfp, &page); 671633dc92aSRussell King else if (!(gfp & __GFP_WAIT)) 672e9da6e99SMarek Szyprowski addr = __alloc_from_pool(size, &page); 673f1ae98daSMarek Szyprowski else if (!IS_ENABLED(CONFIG_CMA)) 674c7909509SMarek Szyprowski addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 67531ebf944SRussell King else 6769848e48fSMarek Szyprowski addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 67731ebf944SRussell King 67831ebf944SRussell King if (addr) 6799eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 68031ebf944SRussell King 68131ebf944SRussell King return addr; 682ab6494f0SCatalin Marinas } 683695ae0afSRussell King 6840ddbccd1SRussell King /* 6850ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 6860ddbccd1SRussell King * virtual and bus address for that space. 6870ddbccd1SRussell King */ 688f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 689f99d6034SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 6900ddbccd1SRussell King { 691f99d6034SMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 6920ddbccd1SRussell King void *memory; 6930ddbccd1SRussell King 6940ddbccd1SRussell King if (dma_alloc_from_coherent(dev, size, handle, &memory)) 6950ddbccd1SRussell King return memory; 6960ddbccd1SRussell King 697dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 698dd37e940SRob Herring __builtin_return_address(0)); 699dd37e940SRob Herring } 700dd37e940SRob Herring 701dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 702dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 703dd37e940SRob Herring { 704dd37e940SRob Herring pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 705dd37e940SRob Herring void *memory; 706dd37e940SRob Herring 707dd37e940SRob Herring if (dma_alloc_from_coherent(dev, size, handle, &memory)) 708dd37e940SRob Herring return memory; 709dd37e940SRob Herring 710dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, true, 71145cd5290SRussell King __builtin_return_address(0)); 7120ddbccd1SRussell King } 7130ddbccd1SRussell King 7140ddbccd1SRussell King /* 715f99d6034SMarek Szyprowski * Create userspace mapping for the DMA-coherent memory. 7160ddbccd1SRussell King */ 717f99d6034SMarek Szyprowski int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 718f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 719f99d6034SMarek Szyprowski struct dma_attrs *attrs) 7200ddbccd1SRussell King { 721ab6494f0SCatalin Marinas int ret = -ENXIO; 722ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 72350262a4bSMarek Szyprowski unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 72450262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 725c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 72650262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 72750262a4bSMarek Szyprowski 728f99d6034SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 729f99d6034SMarek Szyprowski 73047142f07SMarek Szyprowski if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 73147142f07SMarek Szyprowski return ret; 73247142f07SMarek Szyprowski 73350262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 7340ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 73550262a4bSMarek Szyprowski pfn + off, 736c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 7370ddbccd1SRussell King vma->vm_page_prot); 73850262a4bSMarek Szyprowski } 739ab6494f0SCatalin Marinas #endif /* CONFIG_MMU */ 7400ddbccd1SRussell King 7410ddbccd1SRussell King return ret; 7420ddbccd1SRussell King } 7430ddbccd1SRussell King 7440ddbccd1SRussell King /* 745c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 7460ddbccd1SRussell King */ 747dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 748dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs, 749dd37e940SRob Herring bool is_coherent) 7500ddbccd1SRussell King { 751c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 7520ddbccd1SRussell King 7530ddbccd1SRussell King if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 7540ddbccd1SRussell King return; 7550ddbccd1SRussell King 7563e82d012SRussell King size = PAGE_ALIGN(size); 7573e82d012SRussell King 758dd37e940SRob Herring if (is_coherent || nommu()) { 759c7909509SMarek Szyprowski __dma_free_buffer(page, size); 760d9e0d149SAaro Koskinen } else if (__free_from_pool(cpu_addr, size)) { 761d9e0d149SAaro Koskinen return; 762f1ae98daSMarek Szyprowski } else if (!IS_ENABLED(CONFIG_CMA)) { 763695ae0afSRussell King __dma_free_remap(cpu_addr, size); 764c7909509SMarek Szyprowski __dma_free_buffer(page, size); 765c7909509SMarek Szyprowski } else { 766c7909509SMarek Szyprowski /* 767c7909509SMarek Szyprowski * Non-atomic allocations cannot be freed with IRQs disabled 768c7909509SMarek Szyprowski */ 769c7909509SMarek Szyprowski WARN_ON(irqs_disabled()); 7709848e48fSMarek Szyprowski __free_from_contiguous(dev, page, cpu_addr, size); 771c7909509SMarek Szyprowski } 7720ddbccd1SRussell King } 773afd1a321SRussell King 774dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 775dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 776dd37e940SRob Herring { 777dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 778dd37e940SRob Herring } 779dd37e940SRob Herring 780dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 781dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 782dd37e940SRob Herring { 783dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 784dd37e940SRob Herring } 785dd37e940SRob Herring 786dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 787dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 788dc2832e1SMarek Szyprowski struct dma_attrs *attrs) 789dc2832e1SMarek Szyprowski { 790dc2832e1SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 791dc2832e1SMarek Szyprowski int ret; 792dc2832e1SMarek Szyprowski 793dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 794dc2832e1SMarek Szyprowski if (unlikely(ret)) 795dc2832e1SMarek Szyprowski return ret; 796dc2832e1SMarek Szyprowski 797dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 798dc2832e1SMarek Szyprowski return 0; 799dc2832e1SMarek Szyprowski } 800dc2832e1SMarek Szyprowski 80165af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 802a9c9147eSRussell King size_t size, enum dma_data_direction dir, 803a9c9147eSRussell King void (*op)(const void *, size_t, int)) 80465af191aSRussell King { 80515653371SRussell King unsigned long pfn; 80615653371SRussell King size_t left = size; 80715653371SRussell King 80815653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 80915653371SRussell King offset %= PAGE_SIZE; 81015653371SRussell King 81165af191aSRussell King /* 81265af191aSRussell King * A single sg entry may refer to multiple physically contiguous 81365af191aSRussell King * pages. But we still need to process highmem pages individually. 81465af191aSRussell King * If highmem is not configured then the bulk of this loop gets 81565af191aSRussell King * optimized out. 81665af191aSRussell King */ 81765af191aSRussell King do { 81865af191aSRussell King size_t len = left; 81993f1d629SRussell King void *vaddr; 82093f1d629SRussell King 82115653371SRussell King page = pfn_to_page(pfn); 82215653371SRussell King 82393f1d629SRussell King if (PageHighMem(page)) { 82415653371SRussell King if (len + offset > PAGE_SIZE) 82565af191aSRussell King len = PAGE_SIZE - offset; 826dd0f67f4SJoonsoo Kim 827dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 82839af22a7SNicolas Pitre vaddr = kmap_atomic(page); 8297e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 83039af22a7SNicolas Pitre kunmap_atomic(vaddr); 831dd0f67f4SJoonsoo Kim } else { 832dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 833dd0f67f4SJoonsoo Kim if (vaddr) { 834dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 835dd0f67f4SJoonsoo Kim kunmap_high(page); 836dd0f67f4SJoonsoo Kim } 83793f1d629SRussell King } 83893f1d629SRussell King } else { 83993f1d629SRussell King vaddr = page_address(page) + offset; 840a9c9147eSRussell King op(vaddr, len, dir); 84193f1d629SRussell King } 84265af191aSRussell King offset = 0; 84315653371SRussell King pfn++; 84465af191aSRussell King left -= len; 84565af191aSRussell King } while (left); 84665af191aSRussell King } 84765af191aSRussell King 84851fde349SMarek Szyprowski /* 84951fde349SMarek Szyprowski * Make an area consistent for devices. 85051fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 85151fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 85251fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 85351fde349SMarek Szyprowski */ 85451fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 85565af191aSRussell King size_t size, enum dma_data_direction dir) 85665af191aSRussell King { 85743377453SNicolas Pitre unsigned long paddr; 85843377453SNicolas Pitre 859a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 86043377453SNicolas Pitre 86165af191aSRussell King paddr = page_to_phys(page) + off; 8622ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 8632ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8642ffe2da3SRussell King } else { 8652ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 8662ffe2da3SRussell King } 8672ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 86843377453SNicolas Pitre } 8694ea0d737SRussell King 87051fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 8714ea0d737SRussell King size_t size, enum dma_data_direction dir) 8724ea0d737SRussell King { 8732ffe2da3SRussell King unsigned long paddr = page_to_phys(page) + off; 8742ffe2da3SRussell King 8752ffe2da3SRussell King /* FIXME: non-speculating: not required */ 8762ffe2da3SRussell King /* don't bother invalidating if DMA to device */ 8772ffe2da3SRussell King if (dir != DMA_TO_DEVICE) 8782ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8792ffe2da3SRussell King 880a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 881c0177800SCatalin Marinas 882c0177800SCatalin Marinas /* 883c0177800SCatalin Marinas * Mark the D-cache clean for this page to avoid extra flushing. 884c0177800SCatalin Marinas */ 885c0177800SCatalin Marinas if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) 886c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 8874ea0d737SRussell King } 88843377453SNicolas Pitre 889afd1a321SRussell King /** 8902a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 891afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 892afd1a321SRussell King * @sg: list of buffers 893afd1a321SRussell King * @nents: number of buffers to map 894afd1a321SRussell King * @dir: DMA transfer direction 895afd1a321SRussell King * 896afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 897afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 898afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 899afd1a321SRussell King * appropriate dma address and length. They are obtained via 900afd1a321SRussell King * sg_dma_{address,length}. 901afd1a321SRussell King * 902afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 903afd1a321SRussell King * here. 904afd1a321SRussell King */ 9052dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 9062dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 907afd1a321SRussell King { 9082a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 909afd1a321SRussell King struct scatterlist *s; 91001135d92SRussell King int i, j; 911afd1a321SRussell King 912afd1a321SRussell King for_each_sg(sg, s, nents, i) { 9134ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 9144ce63fcdSMarek Szyprowski s->dma_length = s->length; 9154ce63fcdSMarek Szyprowski #endif 9162a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 9172a550e73SMarek Szyprowski s->length, dir, attrs); 91801135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 91901135d92SRussell King goto bad_mapping; 920afd1a321SRussell King } 921afd1a321SRussell King return nents; 92201135d92SRussell King 92301135d92SRussell King bad_mapping: 92401135d92SRussell King for_each_sg(sg, s, i, j) 9252a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 92601135d92SRussell King return 0; 927afd1a321SRussell King } 928afd1a321SRussell King 929afd1a321SRussell King /** 9302a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 931afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 932afd1a321SRussell King * @sg: list of buffers 9330adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 934afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 935afd1a321SRussell King * 936afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 937afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 938afd1a321SRussell King */ 9392dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 9402dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 941afd1a321SRussell King { 9422a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 94301135d92SRussell King struct scatterlist *s; 94401135d92SRussell King 94501135d92SRussell King int i; 94624056f52SRussell King 94701135d92SRussell King for_each_sg(sg, s, nents, i) 9482a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 949afd1a321SRussell King } 950afd1a321SRussell King 951afd1a321SRussell King /** 9522a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 953afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 954afd1a321SRussell King * @sg: list of buffers 955afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 956afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 957afd1a321SRussell King */ 9582dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 959afd1a321SRussell King int nents, enum dma_data_direction dir) 960afd1a321SRussell King { 9612a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 962afd1a321SRussell King struct scatterlist *s; 963afd1a321SRussell King int i; 964afd1a321SRussell King 9652a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 9662a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 9672a550e73SMarek Szyprowski dir); 968afd1a321SRussell King } 96924056f52SRussell King 970afd1a321SRussell King /** 9712a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 972afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 973afd1a321SRussell King * @sg: list of buffers 974afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 975afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 976afd1a321SRussell King */ 9772dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 978afd1a321SRussell King int nents, enum dma_data_direction dir) 979afd1a321SRussell King { 9802a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 981afd1a321SRussell King struct scatterlist *s; 982afd1a321SRussell King int i; 983afd1a321SRussell King 9842a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 9852a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 9862a550e73SMarek Szyprowski dir); 987afd1a321SRussell King } 98824056f52SRussell King 989022ae537SRussell King /* 990022ae537SRussell King * Return whether the given device DMA address mask can be supported 991022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 992022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 993022ae537SRussell King * to this function. 994022ae537SRussell King */ 995022ae537SRussell King int dma_supported(struct device *dev, u64 mask) 996022ae537SRussell King { 997022ae537SRussell King if (mask < (u64)arm_dma_limit) 998022ae537SRussell King return 0; 999022ae537SRussell King return 1; 1000022ae537SRussell King } 1001022ae537SRussell King EXPORT_SYMBOL(dma_supported); 1002022ae537SRussell King 100387b54e78SGregory CLEMENT int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1004022ae537SRussell King { 1005022ae537SRussell King if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1006022ae537SRussell King return -EIO; 1007022ae537SRussell King 1008022ae537SRussell King *dev->dma_mask = dma_mask; 1009022ae537SRussell King 1010022ae537SRussell King return 0; 1011022ae537SRussell King } 1012022ae537SRussell King 101324056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES 4096 101424056f52SRussell King 101524056f52SRussell King static int __init dma_debug_do_init(void) 101624056f52SRussell King { 101724056f52SRussell King dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 101824056f52SRussell King return 0; 101924056f52SRussell King } 102024056f52SRussell King fs_initcall(dma_debug_do_init); 10214ce63fcdSMarek Szyprowski 10224ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 10234ce63fcdSMarek Szyprowski 10244ce63fcdSMarek Szyprowski /* IOMMU */ 10254ce63fcdSMarek Szyprowski 10264ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 10274ce63fcdSMarek Szyprowski size_t size) 10284ce63fcdSMarek Szyprowski { 10294ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 10304ce63fcdSMarek Szyprowski unsigned int align = 0; 10314ce63fcdSMarek Szyprowski unsigned int count, start; 10324ce63fcdSMarek Szyprowski unsigned long flags; 10334ce63fcdSMarek Szyprowski 103460460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 103560460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 103660460abfSSeung-Woo Kim 10374ce63fcdSMarek Szyprowski count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 10384ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 10394ce63fcdSMarek Szyprowski 10404ce63fcdSMarek Szyprowski if (order > mapping->order) 10414ce63fcdSMarek Szyprowski align = (1 << (order - mapping->order)) - 1; 10424ce63fcdSMarek Szyprowski 10434ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 10444ce63fcdSMarek Szyprowski start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 10454ce63fcdSMarek Szyprowski count, align); 10464ce63fcdSMarek Szyprowski if (start > mapping->bits) { 10474ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10484ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 10494ce63fcdSMarek Szyprowski } 10504ce63fcdSMarek Szyprowski 10514ce63fcdSMarek Szyprowski bitmap_set(mapping->bitmap, start, count); 10524ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10534ce63fcdSMarek Szyprowski 10544ce63fcdSMarek Szyprowski return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 10554ce63fcdSMarek Szyprowski } 10564ce63fcdSMarek Szyprowski 10574ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 10584ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 10594ce63fcdSMarek Szyprowski { 10604ce63fcdSMarek Szyprowski unsigned int start = (addr - mapping->base) >> 10614ce63fcdSMarek Szyprowski (mapping->order + PAGE_SHIFT); 10624ce63fcdSMarek Szyprowski unsigned int count = ((size >> PAGE_SHIFT) + 10634ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 10644ce63fcdSMarek Szyprowski unsigned long flags; 10654ce63fcdSMarek Szyprowski 10664ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 10674ce63fcdSMarek Szyprowski bitmap_clear(mapping->bitmap, start, count); 10684ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10694ce63fcdSMarek Szyprowski } 10704ce63fcdSMarek Szyprowski 1071549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1072549a17e4SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 10734ce63fcdSMarek Szyprowski { 10744ce63fcdSMarek Szyprowski struct page **pages; 10754ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 10764ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 10774ce63fcdSMarek Szyprowski int i = 0; 10784ce63fcdSMarek Szyprowski 10794ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 10804ce63fcdSMarek Szyprowski pages = kzalloc(array_size, gfp); 10814ce63fcdSMarek Szyprowski else 10824ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 10834ce63fcdSMarek Szyprowski if (!pages) 10844ce63fcdSMarek Szyprowski return NULL; 10854ce63fcdSMarek Szyprowski 1086549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1087549a17e4SMarek Szyprowski { 1088549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1089549a17e4SMarek Szyprowski struct page *page; 1090549a17e4SMarek Szyprowski 1091549a17e4SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 1092549a17e4SMarek Szyprowski if (!page) 1093549a17e4SMarek Szyprowski goto error; 1094549a17e4SMarek Szyprowski 1095549a17e4SMarek Szyprowski __dma_clear_buffer(page, size); 1096549a17e4SMarek Szyprowski 1097549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1098549a17e4SMarek Szyprowski pages[i] = page + i; 1099549a17e4SMarek Szyprowski 1100549a17e4SMarek Szyprowski return pages; 1101549a17e4SMarek Szyprowski } 1102549a17e4SMarek Szyprowski 1103f8669befSMarek Szyprowski /* 1104f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1105f8669befSMarek Szyprowski */ 1106f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1107f8669befSMarek Szyprowski 11084ce63fcdSMarek Szyprowski while (count) { 1109593f4735SMarek Szyprowski int j, order = __fls(count); 11104ce63fcdSMarek Szyprowski 1111f8669befSMarek Szyprowski pages[i] = alloc_pages(gfp, order); 11124ce63fcdSMarek Szyprowski while (!pages[i] && order) 1113f8669befSMarek Szyprowski pages[i] = alloc_pages(gfp, --order); 11144ce63fcdSMarek Szyprowski if (!pages[i]) 11154ce63fcdSMarek Szyprowski goto error; 11164ce63fcdSMarek Szyprowski 11175a796eebSHiroshi Doyu if (order) { 11184ce63fcdSMarek Szyprowski split_page(pages[i], order); 11194ce63fcdSMarek Szyprowski j = 1 << order; 11204ce63fcdSMarek Szyprowski while (--j) 11214ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 11225a796eebSHiroshi Doyu } 11234ce63fcdSMarek Szyprowski 11244ce63fcdSMarek Szyprowski __dma_clear_buffer(pages[i], PAGE_SIZE << order); 11254ce63fcdSMarek Szyprowski i += 1 << order; 11264ce63fcdSMarek Szyprowski count -= 1 << order; 11274ce63fcdSMarek Szyprowski } 11284ce63fcdSMarek Szyprowski 11294ce63fcdSMarek Szyprowski return pages; 11304ce63fcdSMarek Szyprowski error: 11319fa8af91SMarek Szyprowski while (i--) 11324ce63fcdSMarek Szyprowski if (pages[i]) 11334ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 113446c87852SPrathyush K if (array_size <= PAGE_SIZE) 11354ce63fcdSMarek Szyprowski kfree(pages); 11364ce63fcdSMarek Szyprowski else 11374ce63fcdSMarek Szyprowski vfree(pages); 11384ce63fcdSMarek Szyprowski return NULL; 11394ce63fcdSMarek Szyprowski } 11404ce63fcdSMarek Szyprowski 1141549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 1142549a17e4SMarek Szyprowski size_t size, struct dma_attrs *attrs) 11434ce63fcdSMarek Szyprowski { 11444ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 11454ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 11464ce63fcdSMarek Szyprowski int i; 1147549a17e4SMarek Szyprowski 1148549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1149549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1150549a17e4SMarek Szyprowski } else { 11514ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 11524ce63fcdSMarek Szyprowski if (pages[i]) 11534ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1154549a17e4SMarek Szyprowski } 1155549a17e4SMarek Szyprowski 115646c87852SPrathyush K if (array_size <= PAGE_SIZE) 11574ce63fcdSMarek Szyprowski kfree(pages); 11584ce63fcdSMarek Szyprowski else 11594ce63fcdSMarek Szyprowski vfree(pages); 11604ce63fcdSMarek Szyprowski return 0; 11614ce63fcdSMarek Szyprowski } 11624ce63fcdSMarek Szyprowski 11634ce63fcdSMarek Szyprowski /* 11644ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 11654ce63fcdSMarek Szyprowski */ 11664ce63fcdSMarek Szyprowski static void * 1167e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1168e9da6e99SMarek Szyprowski const void *caller) 11694ce63fcdSMarek Szyprowski { 1170e9da6e99SMarek Szyprowski unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1171e9da6e99SMarek Szyprowski struct vm_struct *area; 1172e9da6e99SMarek Szyprowski unsigned long p; 11734ce63fcdSMarek Szyprowski 1174e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 1175e9da6e99SMarek Szyprowski caller); 1176e9da6e99SMarek Szyprowski if (!area) 11774ce63fcdSMarek Szyprowski return NULL; 1178e9da6e99SMarek Szyprowski 1179e9da6e99SMarek Szyprowski area->pages = pages; 1180e9da6e99SMarek Szyprowski area->nr_pages = nr_pages; 1181e9da6e99SMarek Szyprowski p = (unsigned long)area->addr; 1182e9da6e99SMarek Szyprowski 1183e9da6e99SMarek Szyprowski for (i = 0; i < nr_pages; i++) { 1184e9da6e99SMarek Szyprowski phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); 1185e9da6e99SMarek Szyprowski if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) 1186e9da6e99SMarek Szyprowski goto err; 1187e9da6e99SMarek Szyprowski p += PAGE_SIZE; 11884ce63fcdSMarek Szyprowski } 1189e9da6e99SMarek Szyprowski return area->addr; 1190e9da6e99SMarek Szyprowski err: 1191e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)area->addr, size); 1192e9da6e99SMarek Szyprowski vunmap(area->addr); 11934ce63fcdSMarek Szyprowski return NULL; 11944ce63fcdSMarek Szyprowski } 11954ce63fcdSMarek Szyprowski 11964ce63fcdSMarek Szyprowski /* 11974ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 11984ce63fcdSMarek Szyprowski */ 11994ce63fcdSMarek Szyprowski static dma_addr_t 12004ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 12014ce63fcdSMarek Szyprowski { 12024ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12034ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 12044ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 12054ce63fcdSMarek Szyprowski int i, ret = DMA_ERROR_CODE; 12064ce63fcdSMarek Szyprowski 12074ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 12084ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 12094ce63fcdSMarek Szyprowski return dma_addr; 12104ce63fcdSMarek Szyprowski 12114ce63fcdSMarek Szyprowski iova = dma_addr; 12124ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 12134ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 12144ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 12154ce63fcdSMarek Szyprowski unsigned int len, j; 12164ce63fcdSMarek Szyprowski 12174ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 12184ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 12194ce63fcdSMarek Szyprowski break; 12204ce63fcdSMarek Szyprowski 12214ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 12224ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, iova, phys, len, 0); 12234ce63fcdSMarek Szyprowski if (ret < 0) 12244ce63fcdSMarek Szyprowski goto fail; 12254ce63fcdSMarek Szyprowski iova += len; 12264ce63fcdSMarek Szyprowski i = j; 12274ce63fcdSMarek Szyprowski } 12284ce63fcdSMarek Szyprowski return dma_addr; 12294ce63fcdSMarek Szyprowski fail: 12304ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 12314ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 12324ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 12334ce63fcdSMarek Szyprowski } 12344ce63fcdSMarek Szyprowski 12354ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 12364ce63fcdSMarek Szyprowski { 12374ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12384ce63fcdSMarek Szyprowski 12394ce63fcdSMarek Szyprowski /* 12404ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 12414ce63fcdSMarek Szyprowski * result to page size 12424ce63fcdSMarek Szyprowski */ 12434ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 12444ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 12454ce63fcdSMarek Szyprowski 12464ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 12474ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 12484ce63fcdSMarek Szyprowski return 0; 12494ce63fcdSMarek Szyprowski } 12504ce63fcdSMarek Szyprowski 1251665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1252665bad7bSHiroshi Doyu { 1253665bad7bSHiroshi Doyu struct dma_pool *pool = &atomic_pool; 1254665bad7bSHiroshi Doyu struct page **pages = pool->pages; 1255665bad7bSHiroshi Doyu int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1256665bad7bSHiroshi Doyu 1257665bad7bSHiroshi Doyu return pages + offs; 1258665bad7bSHiroshi Doyu } 1259665bad7bSHiroshi Doyu 1260955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1261e9da6e99SMarek Szyprowski { 1262e9da6e99SMarek Szyprowski struct vm_struct *area; 1263e9da6e99SMarek Szyprowski 1264665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1265665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1266665bad7bSHiroshi Doyu 1267955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1268955c757eSMarek Szyprowski return cpu_addr; 1269955c757eSMarek Szyprowski 1270e9da6e99SMarek Szyprowski area = find_vm_area(cpu_addr); 1271e9da6e99SMarek Szyprowski if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1272e9da6e99SMarek Szyprowski return area->pages; 1273e9da6e99SMarek Szyprowski return NULL; 1274e9da6e99SMarek Szyprowski } 1275e9da6e99SMarek Szyprowski 1276479ed93aSHiroshi Doyu static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1277479ed93aSHiroshi Doyu dma_addr_t *handle) 1278479ed93aSHiroshi Doyu { 1279479ed93aSHiroshi Doyu struct page *page; 1280479ed93aSHiroshi Doyu void *addr; 1281479ed93aSHiroshi Doyu 1282479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1283479ed93aSHiroshi Doyu if (!addr) 1284479ed93aSHiroshi Doyu return NULL; 1285479ed93aSHiroshi Doyu 1286479ed93aSHiroshi Doyu *handle = __iommu_create_mapping(dev, &page, size); 1287479ed93aSHiroshi Doyu if (*handle == DMA_ERROR_CODE) 1288479ed93aSHiroshi Doyu goto err_mapping; 1289479ed93aSHiroshi Doyu 1290479ed93aSHiroshi Doyu return addr; 1291479ed93aSHiroshi Doyu 1292479ed93aSHiroshi Doyu err_mapping: 1293479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1294479ed93aSHiroshi Doyu return NULL; 1295479ed93aSHiroshi Doyu } 1296479ed93aSHiroshi Doyu 1297d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1298479ed93aSHiroshi Doyu dma_addr_t handle, size_t size) 1299479ed93aSHiroshi Doyu { 1300479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 1301d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1302479ed93aSHiroshi Doyu } 1303479ed93aSHiroshi Doyu 13044ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 13054ce63fcdSMarek Szyprowski dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 13064ce63fcdSMarek Szyprowski { 13074ce63fcdSMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 13084ce63fcdSMarek Szyprowski struct page **pages; 13094ce63fcdSMarek Szyprowski void *addr = NULL; 13104ce63fcdSMarek Szyprowski 13114ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 13124ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13134ce63fcdSMarek Szyprowski 1314479ed93aSHiroshi Doyu if (gfp & GFP_ATOMIC) 1315479ed93aSHiroshi Doyu return __iommu_alloc_atomic(dev, size, handle); 1316479ed93aSHiroshi Doyu 1317549a17e4SMarek Szyprowski pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 13184ce63fcdSMarek Szyprowski if (!pages) 13194ce63fcdSMarek Szyprowski return NULL; 13204ce63fcdSMarek Szyprowski 13214ce63fcdSMarek Szyprowski *handle = __iommu_create_mapping(dev, pages, size); 13224ce63fcdSMarek Szyprowski if (*handle == DMA_ERROR_CODE) 13234ce63fcdSMarek Szyprowski goto err_buffer; 13244ce63fcdSMarek Szyprowski 1325955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1326955c757eSMarek Szyprowski return pages; 1327955c757eSMarek Szyprowski 1328e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1329e9da6e99SMarek Szyprowski __builtin_return_address(0)); 13304ce63fcdSMarek Szyprowski if (!addr) 13314ce63fcdSMarek Szyprowski goto err_mapping; 13324ce63fcdSMarek Szyprowski 13334ce63fcdSMarek Szyprowski return addr; 13344ce63fcdSMarek Szyprowski 13354ce63fcdSMarek Szyprowski err_mapping: 13364ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 13374ce63fcdSMarek Szyprowski err_buffer: 1338549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 13394ce63fcdSMarek Szyprowski return NULL; 13404ce63fcdSMarek Szyprowski } 13414ce63fcdSMarek Szyprowski 13424ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 13434ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 13444ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 13454ce63fcdSMarek Szyprowski { 13464ce63fcdSMarek Szyprowski unsigned long uaddr = vma->vm_start; 13474ce63fcdSMarek Szyprowski unsigned long usize = vma->vm_end - vma->vm_start; 1348955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1349e9da6e99SMarek Szyprowski 1350e9da6e99SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1351e9da6e99SMarek Szyprowski 1352e9da6e99SMarek Szyprowski if (!pages) 1353e9da6e99SMarek Szyprowski return -ENXIO; 13544ce63fcdSMarek Szyprowski 13554ce63fcdSMarek Szyprowski do { 1356e9da6e99SMarek Szyprowski int ret = vm_insert_page(vma, uaddr, *pages++); 13574ce63fcdSMarek Szyprowski if (ret) { 1358e9da6e99SMarek Szyprowski pr_err("Remapping memory failed: %d\n", ret); 13594ce63fcdSMarek Szyprowski return ret; 13604ce63fcdSMarek Szyprowski } 13614ce63fcdSMarek Szyprowski uaddr += PAGE_SIZE; 13624ce63fcdSMarek Szyprowski usize -= PAGE_SIZE; 13634ce63fcdSMarek Szyprowski } while (usize > 0); 1364e9da6e99SMarek Szyprowski 13654ce63fcdSMarek Szyprowski return 0; 13664ce63fcdSMarek Szyprowski } 13674ce63fcdSMarek Szyprowski 13684ce63fcdSMarek Szyprowski /* 13694ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 13704ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 13714ce63fcdSMarek Szyprowski */ 13724ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 13734ce63fcdSMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 13744ce63fcdSMarek Szyprowski { 1375836bfa0dSYoungJun Cho struct page **pages; 13764ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13774ce63fcdSMarek Szyprowski 1378479ed93aSHiroshi Doyu if (__in_atomic_pool(cpu_addr, size)) { 1379d5898291SMarek Szyprowski __iommu_free_atomic(dev, cpu_addr, handle, size); 1380479ed93aSHiroshi Doyu return; 1381479ed93aSHiroshi Doyu } 1382479ed93aSHiroshi Doyu 1383836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1384836bfa0dSYoungJun Cho if (!pages) { 1385836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1386836bfa0dSYoungJun Cho return; 1387836bfa0dSYoungJun Cho } 1388836bfa0dSYoungJun Cho 1389955c757eSMarek Szyprowski if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1390e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 1391e9da6e99SMarek Szyprowski vunmap(cpu_addr); 1392955c757eSMarek Szyprowski } 1393e9da6e99SMarek Szyprowski 13944ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1395549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 13964ce63fcdSMarek Szyprowski } 13974ce63fcdSMarek Szyprowski 1398dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1399dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 1400dc2832e1SMarek Szyprowski size_t size, struct dma_attrs *attrs) 1401dc2832e1SMarek Szyprowski { 1402dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1403dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1404dc2832e1SMarek Szyprowski 1405dc2832e1SMarek Szyprowski if (!pages) 1406dc2832e1SMarek Szyprowski return -ENXIO; 1407dc2832e1SMarek Szyprowski 1408dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1409dc2832e1SMarek Szyprowski GFP_KERNEL); 14104ce63fcdSMarek Szyprowski } 14114ce63fcdSMarek Szyprowski 14124ce63fcdSMarek Szyprowski /* 14134ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 14144ce63fcdSMarek Szyprowski */ 14154ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 14164ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 14170fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 14180fa478dfSRob Herring bool is_coherent) 14194ce63fcdSMarek Szyprowski { 14204ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 14214ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 14224ce63fcdSMarek Szyprowski int ret = 0; 14234ce63fcdSMarek Szyprowski unsigned int count; 14244ce63fcdSMarek Szyprowski struct scatterlist *s; 14254ce63fcdSMarek Szyprowski 14264ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14274ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 14284ce63fcdSMarek Szyprowski 14294ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 14304ce63fcdSMarek Szyprowski if (iova == DMA_ERROR_CODE) 14314ce63fcdSMarek Szyprowski return -ENOMEM; 14324ce63fcdSMarek Szyprowski 14334ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 14344ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(sg_page(s)); 14354ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 14364ce63fcdSMarek Szyprowski 14370fa478dfSRob Herring if (!is_coherent && 143897ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 14394ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 14404ce63fcdSMarek Szyprowski 14414ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, iova, phys, len, 0); 14424ce63fcdSMarek Szyprowski if (ret < 0) 14434ce63fcdSMarek Szyprowski goto fail; 14444ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 14454ce63fcdSMarek Szyprowski iova += len; 14464ce63fcdSMarek Szyprowski } 14474ce63fcdSMarek Szyprowski *handle = iova_base; 14484ce63fcdSMarek Szyprowski 14494ce63fcdSMarek Szyprowski return 0; 14504ce63fcdSMarek Szyprowski fail: 14514ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 14524ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 14534ce63fcdSMarek Szyprowski return ret; 14544ce63fcdSMarek Szyprowski } 14554ce63fcdSMarek Szyprowski 14560fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 14570fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 14580fa478dfSRob Herring bool is_coherent) 14594ce63fcdSMarek Szyprowski { 14604ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 14614ce63fcdSMarek Szyprowski int i, count = 0; 14624ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 14634ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 14644ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 14654ce63fcdSMarek Szyprowski 14664ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 14674ce63fcdSMarek Szyprowski s = sg_next(s); 14684ce63fcdSMarek Szyprowski 14694ce63fcdSMarek Szyprowski s->dma_address = DMA_ERROR_CODE; 14704ce63fcdSMarek Szyprowski s->dma_length = 0; 14714ce63fcdSMarek Szyprowski 14724ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 14734ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 14740fa478dfSRob Herring dir, attrs, is_coherent) < 0) 14754ce63fcdSMarek Szyprowski goto bad_mapping; 14764ce63fcdSMarek Szyprowski 14774ce63fcdSMarek Szyprowski dma->dma_address += offset; 14784ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 14794ce63fcdSMarek Szyprowski 14804ce63fcdSMarek Szyprowski size = offset = s->offset; 14814ce63fcdSMarek Szyprowski start = s; 14824ce63fcdSMarek Szyprowski dma = sg_next(dma); 14834ce63fcdSMarek Szyprowski count += 1; 14844ce63fcdSMarek Szyprowski } 14854ce63fcdSMarek Szyprowski size += s->length; 14864ce63fcdSMarek Szyprowski } 14870fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 14880fa478dfSRob Herring is_coherent) < 0) 14894ce63fcdSMarek Szyprowski goto bad_mapping; 14904ce63fcdSMarek Szyprowski 14914ce63fcdSMarek Szyprowski dma->dma_address += offset; 14924ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 14934ce63fcdSMarek Szyprowski 14944ce63fcdSMarek Szyprowski return count+1; 14954ce63fcdSMarek Szyprowski 14964ce63fcdSMarek Szyprowski bad_mapping: 14974ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 14984ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 14994ce63fcdSMarek Szyprowski return 0; 15004ce63fcdSMarek Szyprowski } 15014ce63fcdSMarek Szyprowski 15024ce63fcdSMarek Szyprowski /** 15030fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 15040fa478dfSRob Herring * @dev: valid struct device pointer 15050fa478dfSRob Herring * @sg: list of buffers 15060fa478dfSRob Herring * @nents: number of buffers to map 15070fa478dfSRob Herring * @dir: DMA transfer direction 15080fa478dfSRob Herring * 15090fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 15100fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 15110fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 15120fa478dfSRob Herring * obtained via sg_dma_{address,length}. 15130fa478dfSRob Herring */ 15140fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 15150fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 15160fa478dfSRob Herring { 15170fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 15180fa478dfSRob Herring } 15190fa478dfSRob Herring 15200fa478dfSRob Herring /** 15210fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 15220fa478dfSRob Herring * @dev: valid struct device pointer 15230fa478dfSRob Herring * @sg: list of buffers 15240fa478dfSRob Herring * @nents: number of buffers to map 15250fa478dfSRob Herring * @dir: DMA transfer direction 15260fa478dfSRob Herring * 15270fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 15280fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 15290fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 15300fa478dfSRob Herring * sg_dma_{address,length}. 15310fa478dfSRob Herring */ 15320fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 15330fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 15340fa478dfSRob Herring { 15350fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 15360fa478dfSRob Herring } 15370fa478dfSRob Herring 15380fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 15390fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 15400fa478dfSRob Herring bool is_coherent) 15410fa478dfSRob Herring { 15420fa478dfSRob Herring struct scatterlist *s; 15430fa478dfSRob Herring int i; 15440fa478dfSRob Herring 15450fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 15460fa478dfSRob Herring if (sg_dma_len(s)) 15470fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 15480fa478dfSRob Herring sg_dma_len(s)); 15490fa478dfSRob Herring if (!is_coherent && 15500fa478dfSRob Herring !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 15510fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 15520fa478dfSRob Herring s->length, dir); 15530fa478dfSRob Herring } 15540fa478dfSRob Herring } 15550fa478dfSRob Herring 15560fa478dfSRob Herring /** 15570fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 15580fa478dfSRob Herring * @dev: valid struct device pointer 15590fa478dfSRob Herring * @sg: list of buffers 15600fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 15610fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15620fa478dfSRob Herring * 15630fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 15640fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 15650fa478dfSRob Herring */ 15660fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 15670fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 15680fa478dfSRob Herring { 15690fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 15700fa478dfSRob Herring } 15710fa478dfSRob Herring 15720fa478dfSRob Herring /** 15734ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 15744ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 15754ce63fcdSMarek Szyprowski * @sg: list of buffers 15764ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 15774ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15784ce63fcdSMarek Szyprowski * 15794ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 15804ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 15814ce63fcdSMarek Szyprowski */ 15824ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 15834ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 15844ce63fcdSMarek Szyprowski { 15850fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 15864ce63fcdSMarek Szyprowski } 15874ce63fcdSMarek Szyprowski 15884ce63fcdSMarek Szyprowski /** 15894ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 15904ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 15914ce63fcdSMarek Szyprowski * @sg: list of buffers 15924ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 15934ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15944ce63fcdSMarek Szyprowski */ 15954ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 15964ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 15974ce63fcdSMarek Szyprowski { 15984ce63fcdSMarek Szyprowski struct scatterlist *s; 15994ce63fcdSMarek Szyprowski int i; 16004ce63fcdSMarek Szyprowski 16014ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 16024ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 16034ce63fcdSMarek Szyprowski 16044ce63fcdSMarek Szyprowski } 16054ce63fcdSMarek Szyprowski 16064ce63fcdSMarek Szyprowski /** 16074ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 16084ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16094ce63fcdSMarek Szyprowski * @sg: list of buffers 16104ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 16114ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16124ce63fcdSMarek Szyprowski */ 16134ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 16144ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 16154ce63fcdSMarek Szyprowski { 16164ce63fcdSMarek Szyprowski struct scatterlist *s; 16174ce63fcdSMarek Szyprowski int i; 16184ce63fcdSMarek Szyprowski 16194ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 16204ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16214ce63fcdSMarek Szyprowski } 16224ce63fcdSMarek Szyprowski 16234ce63fcdSMarek Szyprowski 16244ce63fcdSMarek Szyprowski /** 16250fa478dfSRob Herring * arm_coherent_iommu_map_page 16260fa478dfSRob Herring * @dev: valid struct device pointer 16270fa478dfSRob Herring * @page: page that buffer resides in 16280fa478dfSRob Herring * @offset: offset into page for start of buffer 16290fa478dfSRob Herring * @size: size of buffer to map 16300fa478dfSRob Herring * @dir: DMA transfer direction 16310fa478dfSRob Herring * 16320fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 16330fa478dfSRob Herring */ 16340fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 16350fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 16360fa478dfSRob Herring struct dma_attrs *attrs) 16370fa478dfSRob Herring { 16380fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 16390fa478dfSRob Herring dma_addr_t dma_addr; 164013987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 16410fa478dfSRob Herring 16420fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 16430fa478dfSRob Herring if (dma_addr == DMA_ERROR_CODE) 16440fa478dfSRob Herring return dma_addr; 16450fa478dfSRob Herring 164613987d68SWill Deacon switch (dir) { 164713987d68SWill Deacon case DMA_BIDIRECTIONAL: 164813987d68SWill Deacon prot = IOMMU_READ | IOMMU_WRITE; 164913987d68SWill Deacon break; 165013987d68SWill Deacon case DMA_TO_DEVICE: 165113987d68SWill Deacon prot = IOMMU_READ; 165213987d68SWill Deacon break; 165313987d68SWill Deacon case DMA_FROM_DEVICE: 165413987d68SWill Deacon prot = IOMMU_WRITE; 165513987d68SWill Deacon break; 165613987d68SWill Deacon default: 165713987d68SWill Deacon prot = 0; 165813987d68SWill Deacon } 165913987d68SWill Deacon 166013987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 16610fa478dfSRob Herring if (ret < 0) 16620fa478dfSRob Herring goto fail; 16630fa478dfSRob Herring 16640fa478dfSRob Herring return dma_addr + offset; 16650fa478dfSRob Herring fail: 16660fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 16670fa478dfSRob Herring return DMA_ERROR_CODE; 16680fa478dfSRob Herring } 16690fa478dfSRob Herring 16700fa478dfSRob Herring /** 16714ce63fcdSMarek Szyprowski * arm_iommu_map_page 16724ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16734ce63fcdSMarek Szyprowski * @page: page that buffer resides in 16744ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 16754ce63fcdSMarek Szyprowski * @size: size of buffer to map 16764ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 16774ce63fcdSMarek Szyprowski * 16784ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 16794ce63fcdSMarek Szyprowski */ 16804ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 16814ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 16824ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 16834ce63fcdSMarek Szyprowski { 16840fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 16854ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 16864ce63fcdSMarek Szyprowski 16870fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 16880fa478dfSRob Herring } 16894ce63fcdSMarek Szyprowski 16900fa478dfSRob Herring /** 16910fa478dfSRob Herring * arm_coherent_iommu_unmap_page 16920fa478dfSRob Herring * @dev: valid struct device pointer 16930fa478dfSRob Herring * @handle: DMA address of buffer 16940fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 16950fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 16960fa478dfSRob Herring * 16970fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 16980fa478dfSRob Herring */ 16990fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 17000fa478dfSRob Herring size_t size, enum dma_data_direction dir, 17010fa478dfSRob Herring struct dma_attrs *attrs) 17020fa478dfSRob Herring { 17030fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17040fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 17050fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 17060fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 17074ce63fcdSMarek Szyprowski 17080fa478dfSRob Herring if (!iova) 17090fa478dfSRob Herring return; 17100fa478dfSRob Herring 17110fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 17120fa478dfSRob Herring __free_iova(mapping, iova, len); 17134ce63fcdSMarek Szyprowski } 17144ce63fcdSMarek Szyprowski 17154ce63fcdSMarek Szyprowski /** 17164ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 17174ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17184ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 17194ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 17204ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 17214ce63fcdSMarek Szyprowski * 17224ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 17234ce63fcdSMarek Szyprowski */ 17244ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 17254ce63fcdSMarek Szyprowski size_t size, enum dma_data_direction dir, 17264ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 17274ce63fcdSMarek Szyprowski { 17284ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17294ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17304ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17314ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 17324ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 17334ce63fcdSMarek Szyprowski 17344ce63fcdSMarek Szyprowski if (!iova) 17354ce63fcdSMarek Szyprowski return; 17364ce63fcdSMarek Szyprowski 17370fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 17384ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 17394ce63fcdSMarek Szyprowski 17404ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 17414ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 17424ce63fcdSMarek Szyprowski } 17434ce63fcdSMarek Szyprowski 17444ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 17454ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 17464ce63fcdSMarek Szyprowski { 17474ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17484ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17494ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17504ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 17514ce63fcdSMarek Szyprowski 17524ce63fcdSMarek Szyprowski if (!iova) 17534ce63fcdSMarek Szyprowski return; 17544ce63fcdSMarek Szyprowski 17554ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 17564ce63fcdSMarek Szyprowski } 17574ce63fcdSMarek Szyprowski 17584ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 17594ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 17604ce63fcdSMarek Szyprowski { 17614ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17624ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17634ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17644ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 17654ce63fcdSMarek Szyprowski 17664ce63fcdSMarek Szyprowski if (!iova) 17674ce63fcdSMarek Szyprowski return; 17684ce63fcdSMarek Szyprowski 17694ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 17704ce63fcdSMarek Szyprowski } 17714ce63fcdSMarek Szyprowski 17724ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = { 17734ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 17744ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 17754ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1776dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 17774ce63fcdSMarek Szyprowski 17784ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 17794ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 17804ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 17814ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 17824ce63fcdSMarek Szyprowski 17834ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 17844ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 17854ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 17864ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1787d09e1333SHiroshi Doyu 1788d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 17894ce63fcdSMarek Szyprowski }; 17904ce63fcdSMarek Szyprowski 17910fa478dfSRob Herring struct dma_map_ops iommu_coherent_ops = { 17920fa478dfSRob Herring .alloc = arm_iommu_alloc_attrs, 17930fa478dfSRob Herring .free = arm_iommu_free_attrs, 17940fa478dfSRob Herring .mmap = arm_iommu_mmap_attrs, 17950fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 17960fa478dfSRob Herring 17970fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 17980fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 17990fa478dfSRob Herring 18000fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 18010fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 1802d09e1333SHiroshi Doyu 1803d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 18040fa478dfSRob Herring }; 18050fa478dfSRob Herring 18064ce63fcdSMarek Szyprowski /** 18074ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 18084ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 18094ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 18104ce63fcdSMarek Szyprowski * @size: size of the valid IO address space 18114ce63fcdSMarek Szyprowski * @order: accuracy of the IO addresses allocations 18124ce63fcdSMarek Szyprowski * 18134ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 18144ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 18154ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 18164ce63fcdSMarek Szyprowski * 18174ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 18184ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 18194ce63fcdSMarek Szyprowski */ 18204ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 18214ce63fcdSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 18224ce63fcdSMarek Szyprowski int order) 18234ce63fcdSMarek Szyprowski { 18244ce63fcdSMarek Szyprowski unsigned int count = size >> (PAGE_SHIFT + order); 18254ce63fcdSMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 18264ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 18274ce63fcdSMarek Szyprowski int err = -ENOMEM; 18284ce63fcdSMarek Szyprowski 18294ce63fcdSMarek Szyprowski if (!count) 18304ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 18314ce63fcdSMarek Szyprowski 18324ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 18334ce63fcdSMarek Szyprowski if (!mapping) 18344ce63fcdSMarek Szyprowski goto err; 18354ce63fcdSMarek Szyprowski 18364ce63fcdSMarek Szyprowski mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 18374ce63fcdSMarek Szyprowski if (!mapping->bitmap) 18384ce63fcdSMarek Szyprowski goto err2; 18394ce63fcdSMarek Szyprowski 18404ce63fcdSMarek Szyprowski mapping->base = base; 18414ce63fcdSMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 18424ce63fcdSMarek Szyprowski mapping->order = order; 18434ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 18444ce63fcdSMarek Szyprowski 18454ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 18464ce63fcdSMarek Szyprowski if (!mapping->domain) 18474ce63fcdSMarek Szyprowski goto err3; 18484ce63fcdSMarek Szyprowski 18494ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 18504ce63fcdSMarek Szyprowski return mapping; 18514ce63fcdSMarek Szyprowski err3: 18524ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 18534ce63fcdSMarek Szyprowski err2: 18544ce63fcdSMarek Szyprowski kfree(mapping); 18554ce63fcdSMarek Szyprowski err: 18564ce63fcdSMarek Szyprowski return ERR_PTR(err); 18574ce63fcdSMarek Szyprowski } 185818177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 18594ce63fcdSMarek Szyprowski 18604ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 18614ce63fcdSMarek Szyprowski { 18624ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 18634ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 18644ce63fcdSMarek Szyprowski 18654ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 18664ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 18674ce63fcdSMarek Szyprowski kfree(mapping); 18684ce63fcdSMarek Szyprowski } 18694ce63fcdSMarek Szyprowski 18704ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 18714ce63fcdSMarek Szyprowski { 18724ce63fcdSMarek Szyprowski if (mapping) 18734ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 18744ce63fcdSMarek Szyprowski } 187518177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 18764ce63fcdSMarek Szyprowski 18774ce63fcdSMarek Szyprowski /** 18784ce63fcdSMarek Szyprowski * arm_iommu_attach_device 18794ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18804ce63fcdSMarek Szyprowski * @mapping: io address space mapping structure (returned from 18814ce63fcdSMarek Szyprowski * arm_iommu_create_mapping) 18824ce63fcdSMarek Szyprowski * 18834ce63fcdSMarek Szyprowski * Attaches specified io address space mapping to the provided device, 18844ce63fcdSMarek Szyprowski * this replaces the dma operations (dma_map_ops pointer) with the 18854ce63fcdSMarek Szyprowski * IOMMU aware version. More than one client might be attached to 18864ce63fcdSMarek Szyprowski * the same io address space mapping. 18874ce63fcdSMarek Szyprowski */ 18884ce63fcdSMarek Szyprowski int arm_iommu_attach_device(struct device *dev, 18894ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 18904ce63fcdSMarek Szyprowski { 18914ce63fcdSMarek Szyprowski int err; 18924ce63fcdSMarek Szyprowski 18934ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 18944ce63fcdSMarek Szyprowski if (err) 18954ce63fcdSMarek Szyprowski return err; 18964ce63fcdSMarek Szyprowski 18974ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 18984ce63fcdSMarek Szyprowski dev->archdata.mapping = mapping; 18994ce63fcdSMarek Szyprowski set_dma_ops(dev, &iommu_ops); 19004ce63fcdSMarek Szyprowski 190175c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 19024ce63fcdSMarek Szyprowski return 0; 19034ce63fcdSMarek Szyprowski } 190418177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 19054ce63fcdSMarek Szyprowski 19066fe36758SHiroshi Doyu /** 19076fe36758SHiroshi Doyu * arm_iommu_detach_device 19086fe36758SHiroshi Doyu * @dev: valid struct device pointer 19096fe36758SHiroshi Doyu * 19106fe36758SHiroshi Doyu * Detaches the provided device from a previously attached map. 19116fe36758SHiroshi Doyu * This voids the dma operations (dma_map_ops pointer) 19126fe36758SHiroshi Doyu */ 19136fe36758SHiroshi Doyu void arm_iommu_detach_device(struct device *dev) 19146fe36758SHiroshi Doyu { 19156fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 19166fe36758SHiroshi Doyu 19176fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 19186fe36758SHiroshi Doyu if (!mapping) { 19196fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 19206fe36758SHiroshi Doyu return; 19216fe36758SHiroshi Doyu } 19226fe36758SHiroshi Doyu 19236fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 19246fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 19259e4b259dSWill Deacon dev->archdata.mapping = NULL; 19266fe36758SHiroshi Doyu set_dma_ops(dev, NULL); 19276fe36758SHiroshi Doyu 19286fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 19296fe36758SHiroshi Doyu } 193018177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 19316fe36758SHiroshi Doyu 19324ce63fcdSMarek Szyprowski #endif 1933