10ddbccd1SRussell King /* 20ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 30ddbccd1SRussell King * 40ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 50ddbccd1SRussell King * 60ddbccd1SRussell King * This program is free software; you can redistribute it and/or modify 70ddbccd1SRussell King * it under the terms of the GNU General Public License version 2 as 80ddbccd1SRussell King * published by the Free Software Foundation. 90ddbccd1SRussell King * 100ddbccd1SRussell King * DMA uncached mapping support. 110ddbccd1SRussell King */ 1211a5aa32SRussell King #include <linux/bootmem.h> 130ddbccd1SRussell King #include <linux/module.h> 140ddbccd1SRussell King #include <linux/mm.h> 155a0e3ad6STejun Heo #include <linux/gfp.h> 160ddbccd1SRussell King #include <linux/errno.h> 170ddbccd1SRussell King #include <linux/list.h> 180ddbccd1SRussell King #include <linux/init.h> 190ddbccd1SRussell King #include <linux/device.h> 200ddbccd1SRussell King #include <linux/dma-mapping.h> 21c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2239af22a7SNicolas Pitre #include <linux/highmem.h> 23c7909509SMarek Szyprowski #include <linux/memblock.h> 2499d1717dSJon Medhurst #include <linux/slab.h> 254ce63fcdSMarek Szyprowski #include <linux/iommu.h> 26e9da6e99SMarek Szyprowski #include <linux/io.h> 274ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 28158e8bfeSAlessandro Rubini #include <linux/sizes.h> 290ddbccd1SRussell King 300ddbccd1SRussell King #include <asm/memory.h> 3143377453SNicolas Pitre #include <asm/highmem.h> 320ddbccd1SRussell King #include <asm/cacheflush.h> 330ddbccd1SRussell King #include <asm/tlbflush.h> 3499d1717dSJon Medhurst #include <asm/mach/arch.h> 354ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 36c7909509SMarek Szyprowski #include <asm/mach/map.h> 37c7909509SMarek Szyprowski #include <asm/system_info.h> 38c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 390ddbccd1SRussell King 40022ae537SRussell King #include "mm.h" 41022ae537SRussell King 4215237e1fSMarek Szyprowski /* 4315237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 4415237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 4515237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 4615237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 4715237e1fSMarek Szyprowski * 4815237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 4915237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 5015237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 5115237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 5215237e1fSMarek Szyprowski * 5315237e1fSMarek Szyprowski */ 5451fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 5515237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5651fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 5715237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5815237e1fSMarek Szyprowski 592dc6a016SMarek Szyprowski /** 602dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 612dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 622dc6a016SMarek Szyprowski * @page: page that buffer resides in 632dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 642dc6a016SMarek Szyprowski * @size: size of buffer to map 652dc6a016SMarek Szyprowski * @dir: DMA transfer direction 662dc6a016SMarek Szyprowski * 672dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 682dc6a016SMarek Szyprowski * or written back. 692dc6a016SMarek Szyprowski * 702dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 712dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 722dc6a016SMarek Szyprowski */ 7351fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 742dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 752dc6a016SMarek Szyprowski struct dma_attrs *attrs) 762dc6a016SMarek Szyprowski { 77dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 7851fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 7951fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 802dc6a016SMarek Szyprowski } 812dc6a016SMarek Szyprowski 82dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 83dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 84dd37e940SRob Herring struct dma_attrs *attrs) 85dd37e940SRob Herring { 86dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 87dd37e940SRob Herring } 88dd37e940SRob Herring 892dc6a016SMarek Szyprowski /** 902dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 912dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 922dc6a016SMarek Szyprowski * @handle: DMA address of buffer 932dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 942dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 952dc6a016SMarek Szyprowski * 962dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 972dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 982dc6a016SMarek Szyprowski * All other usages are undefined. 992dc6a016SMarek Szyprowski * 1002dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1012dc6a016SMarek Szyprowski * whatever the device wrote there. 1022dc6a016SMarek Szyprowski */ 10351fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 1042dc6a016SMarek Szyprowski size_t size, enum dma_data_direction dir, 1052dc6a016SMarek Szyprowski struct dma_attrs *attrs) 1062dc6a016SMarek Szyprowski { 107dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 10851fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 10951fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1102dc6a016SMarek Szyprowski } 1112dc6a016SMarek Szyprowski 11251fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1132dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1142dc6a016SMarek Szyprowski { 1152dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1162dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1172dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1182dc6a016SMarek Szyprowski } 1192dc6a016SMarek Szyprowski 12051fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1212dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1222dc6a016SMarek Szyprowski { 1232dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1242dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1252dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1262dc6a016SMarek Szyprowski } 1272dc6a016SMarek Szyprowski 1282dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = { 129f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 130f99d6034SMarek Szyprowski .free = arm_dma_free, 131f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 132dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1332dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1342dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1352dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1362dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 1372dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1382dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1392dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1402dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 1412dc6a016SMarek Szyprowski .set_dma_mask = arm_dma_set_mask, 1422dc6a016SMarek Szyprowski }; 1432dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1442dc6a016SMarek Szyprowski 145dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 146dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 147dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 148dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs); 149dd37e940SRob Herring 150dd37e940SRob Herring struct dma_map_ops arm_coherent_dma_ops = { 151dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 152dd37e940SRob Herring .free = arm_coherent_dma_free, 153dd37e940SRob Herring .mmap = arm_dma_mmap, 154dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 155dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 156dd37e940SRob Herring .map_sg = arm_dma_map_sg, 157dd37e940SRob Herring .set_dma_mask = arm_dma_set_mask, 158dd37e940SRob Herring }; 159dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 160dd37e940SRob Herring 161ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 162ab6494f0SCatalin Marinas { 1634dcfa600SRussell King u64 mask = (u64)DMA_BIT_MASK(32); 1640ddbccd1SRussell King 165ab6494f0SCatalin Marinas if (dev) { 16611a5aa32SRussell King unsigned long max_dma_pfn; 16711a5aa32SRussell King 168ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 169ab6494f0SCatalin Marinas 170ab6494f0SCatalin Marinas /* 171ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 172ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 173ab6494f0SCatalin Marinas */ 174ab6494f0SCatalin Marinas if (mask == 0) { 175ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 176ab6494f0SCatalin Marinas return 0; 177ab6494f0SCatalin Marinas } 178ab6494f0SCatalin Marinas 17911a5aa32SRussell King max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 18011a5aa32SRussell King 1814dcfa600SRussell King /* 1824dcfa600SRussell King * If the mask allows for more memory than we can address, 1834dcfa600SRussell King * and we actually have that much memory, then fail the 1844dcfa600SRussell King * allocation. 1854dcfa600SRussell King */ 1864dcfa600SRussell King if (sizeof(mask) != sizeof(dma_addr_t) && 1874dcfa600SRussell King mask > (dma_addr_t)~0 && 18811a5aa32SRussell King dma_to_pfn(dev, ~0) > max_dma_pfn) { 1894dcfa600SRussell King dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 1904dcfa600SRussell King mask); 1914dcfa600SRussell King dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 1924dcfa600SRussell King return 0; 1934dcfa600SRussell King } 1944dcfa600SRussell King 1954dcfa600SRussell King /* 1964dcfa600SRussell King * Now check that the mask, when translated to a PFN, 1974dcfa600SRussell King * fits within the allowable addresses which we can 1984dcfa600SRussell King * allocate. 1994dcfa600SRussell King */ 20011a5aa32SRussell King if (dma_to_pfn(dev, mask) < max_dma_pfn) { 2014dcfa600SRussell King dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 2024dcfa600SRussell King mask, 2034dcfa600SRussell King dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 2044dcfa600SRussell King arm_dma_pfn_limit + 1); 205ab6494f0SCatalin Marinas return 0; 206ab6494f0SCatalin Marinas } 207ab6494f0SCatalin Marinas } 208ab6494f0SCatalin Marinas 209ab6494f0SCatalin Marinas return mask; 210ab6494f0SCatalin Marinas } 211ab6494f0SCatalin Marinas 212c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size) 213c7909509SMarek Szyprowski { 214c7909509SMarek Szyprowski /* 215c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 216c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 217c7909509SMarek Szyprowski */ 2189848e48fSMarek Szyprowski if (PageHighMem(page)) { 2199848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2209848e48fSMarek Szyprowski phys_addr_t end = base + size; 2219848e48fSMarek Szyprowski while (size > 0) { 2229848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2239848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 2249848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2259848e48fSMarek Szyprowski kunmap_atomic(ptr); 2269848e48fSMarek Szyprowski page++; 2279848e48fSMarek Szyprowski size -= PAGE_SIZE; 2289848e48fSMarek Szyprowski } 2299848e48fSMarek Szyprowski outer_flush_range(base, end); 2309848e48fSMarek Szyprowski } else { 2319848e48fSMarek Szyprowski void *ptr = page_address(page); 232c7909509SMarek Szyprowski memset(ptr, 0, size); 233c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 234c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 235c7909509SMarek Szyprowski } 2364ce63fcdSMarek Szyprowski } 237c7909509SMarek Szyprowski 2387a9a32a9SRussell King /* 2397a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2407a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2417a9a32a9SRussell King */ 2427a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 2437a9a32a9SRussell King { 2447a9a32a9SRussell King unsigned long order = get_order(size); 2457a9a32a9SRussell King struct page *page, *p, *e; 2467a9a32a9SRussell King 2477a9a32a9SRussell King page = alloc_pages(gfp, order); 2487a9a32a9SRussell King if (!page) 2497a9a32a9SRussell King return NULL; 2507a9a32a9SRussell King 2517a9a32a9SRussell King /* 2527a9a32a9SRussell King * Now split the huge page and free the excess pages 2537a9a32a9SRussell King */ 2547a9a32a9SRussell King split_page(page, order); 2557a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2567a9a32a9SRussell King __free_page(p); 2577a9a32a9SRussell King 258c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 2597a9a32a9SRussell King 2607a9a32a9SRussell King return page; 2617a9a32a9SRussell King } 2627a9a32a9SRussell King 2637a9a32a9SRussell King /* 2647a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 2657a9a32a9SRussell King */ 2667a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 2677a9a32a9SRussell King { 2687a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 2697a9a32a9SRussell King 2707a9a32a9SRussell King while (page < e) { 2717a9a32a9SRussell King __free_page(page); 2727a9a32a9SRussell King page++; 2737a9a32a9SRussell King } 2747a9a32a9SRussell King } 2757a9a32a9SRussell King 276ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 2770ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE 2781355e2a6SCatalin Marinas #warning ARM Coherent DMA allocator does not (yet) support huge TLB 2790ddbccd1SRussell King #endif 2800ddbccd1SRussell King 281c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 2829848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 2839848e48fSMarek Szyprowski const void *caller); 284c7909509SMarek Szyprowski 285e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 286e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 287e9da6e99SMarek Szyprowski const void *caller); 288e9da6e99SMarek Szyprowski 289e9da6e99SMarek Szyprowski static void * 290e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 291e9da6e99SMarek Szyprowski const void *caller) 292e9da6e99SMarek Szyprowski { 293e9da6e99SMarek Szyprowski struct vm_struct *area; 294e9da6e99SMarek Szyprowski unsigned long addr; 295e9da6e99SMarek Szyprowski 296e9da6e99SMarek Szyprowski /* 297e9da6e99SMarek Szyprowski * DMA allocation can be mapped to user space, so lets 298e9da6e99SMarek Szyprowski * set VM_USERMAP flags too. 299e9da6e99SMarek Szyprowski */ 300e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 301e9da6e99SMarek Szyprowski caller); 302e9da6e99SMarek Szyprowski if (!area) 303e9da6e99SMarek Szyprowski return NULL; 304e9da6e99SMarek Szyprowski addr = (unsigned long)area->addr; 305e9da6e99SMarek Szyprowski area->phys_addr = __pfn_to_phys(page_to_pfn(page)); 306e9da6e99SMarek Szyprowski 307e9da6e99SMarek Szyprowski if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { 308e9da6e99SMarek Szyprowski vunmap((void *)addr); 309e9da6e99SMarek Szyprowski return NULL; 310e9da6e99SMarek Szyprowski } 311e9da6e99SMarek Szyprowski return (void *)addr; 312e9da6e99SMarek Szyprowski } 313e9da6e99SMarek Szyprowski 314e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 315e9da6e99SMarek Szyprowski { 316e9da6e99SMarek Szyprowski unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 317e9da6e99SMarek Szyprowski struct vm_struct *area = find_vm_area(cpu_addr); 318e9da6e99SMarek Szyprowski if (!area || (area->flags & flags) != flags) { 319e9da6e99SMarek Szyprowski WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 320e9da6e99SMarek Szyprowski return; 321e9da6e99SMarek Szyprowski } 322e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 323e9da6e99SMarek Szyprowski vunmap(cpu_addr); 324e9da6e99SMarek Szyprowski } 325e9da6e99SMarek Szyprowski 3266e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 3276e5267aaSMarek Szyprowski 328e9da6e99SMarek Szyprowski struct dma_pool { 329e9da6e99SMarek Szyprowski size_t size; 330e9da6e99SMarek Szyprowski spinlock_t lock; 331e9da6e99SMarek Szyprowski unsigned long *bitmap; 332e9da6e99SMarek Szyprowski unsigned long nr_pages; 333e9da6e99SMarek Szyprowski void *vaddr; 3346b3fe472SHiroshi Doyu struct page **pages; 335c7909509SMarek Szyprowski }; 336c7909509SMarek Szyprowski 337e9da6e99SMarek Szyprowski static struct dma_pool atomic_pool = { 3386e5267aaSMarek Szyprowski .size = DEFAULT_DMA_COHERENT_POOL_SIZE, 339e9da6e99SMarek Szyprowski }; 340c7909509SMarek Szyprowski 341c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 342c7909509SMarek Szyprowski { 343e9da6e99SMarek Szyprowski atomic_pool.size = memparse(p, &p); 344c7909509SMarek Szyprowski return 0; 345c7909509SMarek Szyprowski } 346c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 347c7909509SMarek Szyprowski 3486e5267aaSMarek Szyprowski void __init init_dma_coherent_pool_size(unsigned long size) 3496e5267aaSMarek Szyprowski { 3506e5267aaSMarek Szyprowski /* 3516e5267aaSMarek Szyprowski * Catch any attempt to set the pool size too late. 3526e5267aaSMarek Szyprowski */ 3536e5267aaSMarek Szyprowski BUG_ON(atomic_pool.vaddr); 3546e5267aaSMarek Szyprowski 3556e5267aaSMarek Szyprowski /* 3566e5267aaSMarek Szyprowski * Set architecture specific coherent pool size only if 3576e5267aaSMarek Szyprowski * it has not been changed by kernel command line parameter. 3586e5267aaSMarek Szyprowski */ 3596e5267aaSMarek Szyprowski if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 3606e5267aaSMarek Szyprowski atomic_pool.size = size; 3616e5267aaSMarek Szyprowski } 3626e5267aaSMarek Szyprowski 363c7909509SMarek Szyprowski /* 364c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 365c7909509SMarek Szyprowski */ 366e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 367c7909509SMarek Szyprowski { 368e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 369c7909509SMarek Szyprowski pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 3709d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 371e9da6e99SMarek Szyprowski unsigned long nr_pages = pool->size >> PAGE_SHIFT; 372e9da6e99SMarek Szyprowski unsigned long *bitmap; 373c7909509SMarek Szyprowski struct page *page; 3746b3fe472SHiroshi Doyu struct page **pages; 375c7909509SMarek Szyprowski void *ptr; 376e9da6e99SMarek Szyprowski int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 377c7909509SMarek Szyprowski 378e9da6e99SMarek Szyprowski bitmap = kzalloc(bitmap_size, GFP_KERNEL); 379e9da6e99SMarek Szyprowski if (!bitmap) 380e9da6e99SMarek Szyprowski goto no_bitmap; 381c7909509SMarek Szyprowski 3826b3fe472SHiroshi Doyu pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 3836b3fe472SHiroshi Doyu if (!pages) 3846b3fe472SHiroshi Doyu goto no_pages; 3856b3fe472SHiroshi Doyu 386f825c736SAneesh Kumar K.V if (IS_ENABLED(CONFIG_DMA_CMA)) 3879848e48fSMarek Szyprowski ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 3889848e48fSMarek Szyprowski atomic_pool_init); 389e9da6e99SMarek Szyprowski else 3909d1400cfSMarek Szyprowski ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, 3919d1400cfSMarek Szyprowski atomic_pool_init); 392c7909509SMarek Szyprowski if (ptr) { 3936b3fe472SHiroshi Doyu int i; 3946b3fe472SHiroshi Doyu 3956b3fe472SHiroshi Doyu for (i = 0; i < nr_pages; i++) 3966b3fe472SHiroshi Doyu pages[i] = page + i; 3976b3fe472SHiroshi Doyu 398e9da6e99SMarek Szyprowski spin_lock_init(&pool->lock); 399e9da6e99SMarek Szyprowski pool->vaddr = ptr; 4006b3fe472SHiroshi Doyu pool->pages = pages; 401e9da6e99SMarek Szyprowski pool->bitmap = bitmap; 402e9da6e99SMarek Szyprowski pool->nr_pages = nr_pages; 403e9da6e99SMarek Szyprowski pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 404e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 405c7909509SMarek Szyprowski return 0; 406c7909509SMarek Szyprowski } 407ec10665cSSachin Kamat 408ec10665cSSachin Kamat kfree(pages); 4096b3fe472SHiroshi Doyu no_pages: 410e9da6e99SMarek Szyprowski kfree(bitmap); 411e9da6e99SMarek Szyprowski no_bitmap: 412e9da6e99SMarek Szyprowski pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 413e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 414c7909509SMarek Szyprowski return -ENOMEM; 415c7909509SMarek Szyprowski } 416c7909509SMarek Szyprowski /* 417c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 418c7909509SMarek Szyprowski */ 419e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 420c7909509SMarek Szyprowski 421c7909509SMarek Szyprowski struct dma_contig_early_reserve { 422c7909509SMarek Szyprowski phys_addr_t base; 423c7909509SMarek Szyprowski unsigned long size; 424c7909509SMarek Szyprowski }; 425c7909509SMarek Szyprowski 426c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 427c7909509SMarek Szyprowski 428c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 429c7909509SMarek Szyprowski 430c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 431c7909509SMarek Szyprowski { 432c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 433c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 434c7909509SMarek Szyprowski dma_mmu_remap_num++; 435c7909509SMarek Szyprowski } 436c7909509SMarek Szyprowski 437c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 438c7909509SMarek Szyprowski { 439c7909509SMarek Szyprowski int i; 440c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 441c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 442c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 443c7909509SMarek Szyprowski struct map_desc map; 444c7909509SMarek Szyprowski unsigned long addr; 445c7909509SMarek Szyprowski 446c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 447c7909509SMarek Szyprowski end = arm_lowmem_limit; 448c7909509SMarek Szyprowski if (start >= end) 44939f78e70SChris Brand continue; 450c7909509SMarek Szyprowski 451c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 452c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 453c7909509SMarek Szyprowski map.length = end - start; 454c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 455c7909509SMarek Szyprowski 456c7909509SMarek Szyprowski /* 457c7909509SMarek Szyprowski * Clear previous low-memory mapping 458c7909509SMarek Szyprowski */ 459c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 46061f6c7a4SVitaly Andrianov addr += PMD_SIZE) 461c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 462c7909509SMarek Szyprowski 463c7909509SMarek Szyprowski iotable_init(&map, 1); 464c7909509SMarek Szyprowski } 465c7909509SMarek Szyprowski } 466c7909509SMarek Szyprowski 467c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 468c7909509SMarek Szyprowski void *data) 469c7909509SMarek Szyprowski { 470c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 471c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 472c7909509SMarek Szyprowski 473c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 474c7909509SMarek Szyprowski return 0; 475c7909509SMarek Szyprowski } 476c7909509SMarek Szyprowski 477c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 478c7909509SMarek Szyprowski { 479c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 480c7909509SMarek Szyprowski unsigned end = start + size; 481c7909509SMarek Szyprowski 482c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 483c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 484c7909509SMarek Szyprowski } 485c7909509SMarek Szyprowski 486c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 487c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 488c7909509SMarek Szyprowski const void *caller) 489c7909509SMarek Szyprowski { 490c7909509SMarek Szyprowski struct page *page; 491c7909509SMarek Szyprowski void *ptr; 492c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 493c7909509SMarek Szyprowski if (!page) 494c7909509SMarek Szyprowski return NULL; 495c7909509SMarek Szyprowski 496c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 497c7909509SMarek Szyprowski if (!ptr) { 498c7909509SMarek Szyprowski __dma_free_buffer(page, size); 499c7909509SMarek Szyprowski return NULL; 500c7909509SMarek Szyprowski } 501c7909509SMarek Szyprowski 502c7909509SMarek Szyprowski *ret_page = page; 503c7909509SMarek Szyprowski return ptr; 504c7909509SMarek Szyprowski } 505c7909509SMarek Szyprowski 506e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 507c7909509SMarek Szyprowski { 508e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 509e9da6e99SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 510e9da6e99SMarek Szyprowski unsigned int pageno; 511e9da6e99SMarek Szyprowski unsigned long flags; 512e9da6e99SMarek Szyprowski void *ptr = NULL; 513e4ea6918SAaro Koskinen unsigned long align_mask; 514c7909509SMarek Szyprowski 515e9da6e99SMarek Szyprowski if (!pool->vaddr) { 516e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 517c7909509SMarek Szyprowski return NULL; 518c7909509SMarek Szyprowski } 519c7909509SMarek Szyprowski 520c7909509SMarek Szyprowski /* 521c7909509SMarek Szyprowski * Align the region allocation - allocations from pool are rather 522c7909509SMarek Szyprowski * small, so align them to their order in pages, minimum is a page 523c7909509SMarek Szyprowski * size. This helps reduce fragmentation of the DMA space. 524c7909509SMarek Szyprowski */ 525e4ea6918SAaro Koskinen align_mask = (1 << get_order(size)) - 1; 526e9da6e99SMarek Szyprowski 527e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 528e9da6e99SMarek Szyprowski pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 529e4ea6918SAaro Koskinen 0, count, align_mask); 530e9da6e99SMarek Szyprowski if (pageno < pool->nr_pages) { 531e9da6e99SMarek Szyprowski bitmap_set(pool->bitmap, pageno, count); 532e9da6e99SMarek Szyprowski ptr = pool->vaddr + PAGE_SIZE * pageno; 5336b3fe472SHiroshi Doyu *ret_page = pool->pages[pageno]; 534fb71285fSMarek Szyprowski } else { 535fb71285fSMarek Szyprowski pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 536fb71285fSMarek Szyprowski "Please increase it with coherent_pool= kernel parameter!\n", 537fb71285fSMarek Szyprowski (unsigned)pool->size / 1024); 538e9da6e99SMarek Szyprowski } 539e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 540e9da6e99SMarek Szyprowski 541c7909509SMarek Szyprowski return ptr; 542c7909509SMarek Szyprowski } 543c7909509SMarek Szyprowski 54421d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 54521d0a759SHiroshi Doyu { 54621d0a759SHiroshi Doyu struct dma_pool *pool = &atomic_pool; 54721d0a759SHiroshi Doyu void *end = start + size; 54821d0a759SHiroshi Doyu void *pool_start = pool->vaddr; 54921d0a759SHiroshi Doyu void *pool_end = pool->vaddr + pool->size; 55021d0a759SHiroshi Doyu 551f3d87524SThomas Petazzoni if (start < pool_start || start >= pool_end) 55221d0a759SHiroshi Doyu return false; 55321d0a759SHiroshi Doyu 55421d0a759SHiroshi Doyu if (end <= pool_end) 55521d0a759SHiroshi Doyu return true; 55621d0a759SHiroshi Doyu 55721d0a759SHiroshi Doyu WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", 55821d0a759SHiroshi Doyu start, end - 1, pool_start, pool_end - 1); 55921d0a759SHiroshi Doyu 56021d0a759SHiroshi Doyu return false; 56121d0a759SHiroshi Doyu } 56221d0a759SHiroshi Doyu 563e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 564c7909509SMarek Szyprowski { 565e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 566e9da6e99SMarek Szyprowski unsigned long pageno, count; 567e9da6e99SMarek Szyprowski unsigned long flags; 568c7909509SMarek Szyprowski 56921d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 570c7909509SMarek Szyprowski return 0; 571c7909509SMarek Szyprowski 572e9da6e99SMarek Szyprowski pageno = (start - pool->vaddr) >> PAGE_SHIFT; 573e9da6e99SMarek Szyprowski count = size >> PAGE_SHIFT; 574e9da6e99SMarek Szyprowski 575e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 576e9da6e99SMarek Szyprowski bitmap_clear(pool->bitmap, pageno, count); 577e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 578e9da6e99SMarek Szyprowski 579c7909509SMarek Szyprowski return 1; 580c7909509SMarek Szyprowski } 581c7909509SMarek Szyprowski 582c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5839848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 5849848e48fSMarek Szyprowski const void *caller) 585c7909509SMarek Szyprowski { 586c7909509SMarek Szyprowski unsigned long order = get_order(size); 587c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 588c7909509SMarek Szyprowski struct page *page; 5899848e48fSMarek Szyprowski void *ptr; 590c7909509SMarek Szyprowski 591c7909509SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 592c7909509SMarek Szyprowski if (!page) 593c7909509SMarek Szyprowski return NULL; 594c7909509SMarek Szyprowski 595c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 596c7909509SMarek Szyprowski 5979848e48fSMarek Szyprowski if (PageHighMem(page)) { 5989848e48fSMarek Szyprowski ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 5999848e48fSMarek Szyprowski if (!ptr) { 6009848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 6019848e48fSMarek Szyprowski return NULL; 6029848e48fSMarek Szyprowski } 6039848e48fSMarek Szyprowski } else { 6049848e48fSMarek Szyprowski __dma_remap(page, size, prot); 6059848e48fSMarek Szyprowski ptr = page_address(page); 6069848e48fSMarek Szyprowski } 607c7909509SMarek Szyprowski *ret_page = page; 6089848e48fSMarek Szyprowski return ptr; 609c7909509SMarek Szyprowski } 610c7909509SMarek Szyprowski 611c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 6129848e48fSMarek Szyprowski void *cpu_addr, size_t size) 613c7909509SMarek Szyprowski { 6149848e48fSMarek Szyprowski if (PageHighMem(page)) 6159848e48fSMarek Szyprowski __dma_free_remap(cpu_addr, size); 6169848e48fSMarek Szyprowski else 617c7909509SMarek Szyprowski __dma_remap(page, size, pgprot_kernel); 618c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 619c7909509SMarek Szyprowski } 620c7909509SMarek Szyprowski 621f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 622f99d6034SMarek Szyprowski { 623f99d6034SMarek Szyprowski prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 624f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 625f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 626f99d6034SMarek Szyprowski return prot; 627f99d6034SMarek Szyprowski } 628f99d6034SMarek Szyprowski 629c7909509SMarek Szyprowski #define nommu() 0 630c7909509SMarek Szyprowski 631ab6494f0SCatalin Marinas #else /* !CONFIG_MMU */ 632695ae0afSRussell King 633c7909509SMarek Szyprowski #define nommu() 1 634c7909509SMarek Szyprowski 635f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot) __pgprot(0) 636c7909509SMarek Szyprowski #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 637e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page) NULL 6389848e48fSMarek Szyprowski #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 639c7909509SMarek Szyprowski #define __free_from_pool(cpu_addr, size) 0 6409848e48fSMarek Szyprowski #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 641c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size) do { } while (0) 64231ebf944SRussell King 64331ebf944SRussell King #endif /* CONFIG_MMU */ 64431ebf944SRussell King 645c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 646c7909509SMarek Szyprowski struct page **ret_page) 647ab6494f0SCatalin Marinas { 64804da5694SRussell King struct page *page; 649c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 650c7909509SMarek Szyprowski if (!page) 651c7909509SMarek Szyprowski return NULL; 652c7909509SMarek Szyprowski 653c7909509SMarek Szyprowski *ret_page = page; 654c7909509SMarek Szyprowski return page_address(page); 655c7909509SMarek Szyprowski } 656c7909509SMarek Szyprowski 657c7909509SMarek Szyprowski 658c7909509SMarek Szyprowski 659c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 660dd37e940SRob Herring gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 661c7909509SMarek Szyprowski { 662c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 6633dd7ea92SJingoo Han struct page *page = NULL; 66431ebf944SRussell King void *addr; 665ab6494f0SCatalin Marinas 666c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 667c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 668c7909509SMarek Szyprowski if (limit && size >= limit) { 669c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 670c7909509SMarek Szyprowski size, mask); 671c7909509SMarek Szyprowski return NULL; 672c7909509SMarek Szyprowski } 673c7909509SMarek Szyprowski #endif 674c7909509SMarek Szyprowski 675c7909509SMarek Szyprowski if (!mask) 676c7909509SMarek Szyprowski return NULL; 677c7909509SMarek Szyprowski 678c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 679c7909509SMarek Szyprowski gfp |= GFP_DMA; 680c7909509SMarek Szyprowski 681ea2e7057SSumit Bhattacharya /* 682ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 683ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 684ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 685ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 686ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 687ea2e7057SSumit Bhattacharya */ 688ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 689ea2e7057SSumit Bhattacharya 690553ac788SMarek Szyprowski *handle = DMA_ERROR_CODE; 69104da5694SRussell King size = PAGE_ALIGN(size); 69204da5694SRussell King 693dd37e940SRob Herring if (is_coherent || nommu()) 694c7909509SMarek Szyprowski addr = __alloc_simple_buffer(dev, size, gfp, &page); 695633dc92aSRussell King else if (!(gfp & __GFP_WAIT)) 696e9da6e99SMarek Szyprowski addr = __alloc_from_pool(size, &page); 697f825c736SAneesh Kumar K.V else if (!IS_ENABLED(CONFIG_DMA_CMA)) 698c7909509SMarek Szyprowski addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 69931ebf944SRussell King else 7009848e48fSMarek Szyprowski addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 70131ebf944SRussell King 70231ebf944SRussell King if (addr) 7039eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 70431ebf944SRussell King 70531ebf944SRussell King return addr; 706ab6494f0SCatalin Marinas } 707695ae0afSRussell King 7080ddbccd1SRussell King /* 7090ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 7100ddbccd1SRussell King * virtual and bus address for that space. 7110ddbccd1SRussell King */ 712f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 713f99d6034SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 7140ddbccd1SRussell King { 7150ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 7160ddbccd1SRussell King void *memory; 7170ddbccd1SRussell King 7180ddbccd1SRussell King if (dma_alloc_from_coherent(dev, size, handle, &memory)) 7190ddbccd1SRussell King return memory; 7200ddbccd1SRussell King 721dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 722dd37e940SRob Herring __builtin_return_address(0)); 723dd37e940SRob Herring } 724dd37e940SRob Herring 725dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 726dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 727dd37e940SRob Herring { 7280ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 729dd37e940SRob Herring void *memory; 730dd37e940SRob Herring 731dd37e940SRob Herring if (dma_alloc_from_coherent(dev, size, handle, &memory)) 732dd37e940SRob Herring return memory; 733dd37e940SRob Herring 734dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, true, 73545cd5290SRussell King __builtin_return_address(0)); 7360ddbccd1SRussell King } 7370ddbccd1SRussell King 7380ddbccd1SRussell King /* 739f99d6034SMarek Szyprowski * Create userspace mapping for the DMA-coherent memory. 7400ddbccd1SRussell King */ 741f99d6034SMarek Szyprowski int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 742f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 743f99d6034SMarek Szyprowski struct dma_attrs *attrs) 7440ddbccd1SRussell King { 745ab6494f0SCatalin Marinas int ret = -ENXIO; 746ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 74750262a4bSMarek Szyprowski unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 74850262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 749c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 75050262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 75150262a4bSMarek Szyprowski 752f99d6034SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 753f99d6034SMarek Szyprowski 75447142f07SMarek Szyprowski if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 75547142f07SMarek Szyprowski return ret; 75647142f07SMarek Szyprowski 75750262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 7580ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 75950262a4bSMarek Szyprowski pfn + off, 760c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 7610ddbccd1SRussell King vma->vm_page_prot); 76250262a4bSMarek Szyprowski } 763ab6494f0SCatalin Marinas #endif /* CONFIG_MMU */ 7640ddbccd1SRussell King 7650ddbccd1SRussell King return ret; 7660ddbccd1SRussell King } 7670ddbccd1SRussell King 7680ddbccd1SRussell King /* 769c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 7700ddbccd1SRussell King */ 771dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 772dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs, 773dd37e940SRob Herring bool is_coherent) 7740ddbccd1SRussell King { 775c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 7760ddbccd1SRussell King 7770ddbccd1SRussell King if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 7780ddbccd1SRussell King return; 7790ddbccd1SRussell King 7803e82d012SRussell King size = PAGE_ALIGN(size); 7813e82d012SRussell King 782dd37e940SRob Herring if (is_coherent || nommu()) { 783c7909509SMarek Szyprowski __dma_free_buffer(page, size); 784d9e0d149SAaro Koskinen } else if (__free_from_pool(cpu_addr, size)) { 785d9e0d149SAaro Koskinen return; 786f825c736SAneesh Kumar K.V } else if (!IS_ENABLED(CONFIG_DMA_CMA)) { 787695ae0afSRussell King __dma_free_remap(cpu_addr, size); 788c7909509SMarek Szyprowski __dma_free_buffer(page, size); 789c7909509SMarek Szyprowski } else { 790c7909509SMarek Szyprowski /* 791c7909509SMarek Szyprowski * Non-atomic allocations cannot be freed with IRQs disabled 792c7909509SMarek Szyprowski */ 793c7909509SMarek Szyprowski WARN_ON(irqs_disabled()); 7949848e48fSMarek Szyprowski __free_from_contiguous(dev, page, cpu_addr, size); 795c7909509SMarek Szyprowski } 7960ddbccd1SRussell King } 797afd1a321SRussell King 798dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 799dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 800dd37e940SRob Herring { 801dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 802dd37e940SRob Herring } 803dd37e940SRob Herring 804dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 805dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 806dd37e940SRob Herring { 807dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 808dd37e940SRob Herring } 809dd37e940SRob Herring 810dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 811dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 812dc2832e1SMarek Szyprowski struct dma_attrs *attrs) 813dc2832e1SMarek Szyprowski { 814dc2832e1SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 815dc2832e1SMarek Szyprowski int ret; 816dc2832e1SMarek Szyprowski 817dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 818dc2832e1SMarek Szyprowski if (unlikely(ret)) 819dc2832e1SMarek Szyprowski return ret; 820dc2832e1SMarek Szyprowski 821dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 822dc2832e1SMarek Szyprowski return 0; 823dc2832e1SMarek Szyprowski } 824dc2832e1SMarek Szyprowski 82565af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 826a9c9147eSRussell King size_t size, enum dma_data_direction dir, 827a9c9147eSRussell King void (*op)(const void *, size_t, int)) 82865af191aSRussell King { 82915653371SRussell King unsigned long pfn; 83015653371SRussell King size_t left = size; 83115653371SRussell King 83215653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 83315653371SRussell King offset %= PAGE_SIZE; 83415653371SRussell King 83565af191aSRussell King /* 83665af191aSRussell King * A single sg entry may refer to multiple physically contiguous 83765af191aSRussell King * pages. But we still need to process highmem pages individually. 83865af191aSRussell King * If highmem is not configured then the bulk of this loop gets 83965af191aSRussell King * optimized out. 84065af191aSRussell King */ 84165af191aSRussell King do { 84265af191aSRussell King size_t len = left; 84393f1d629SRussell King void *vaddr; 84493f1d629SRussell King 84515653371SRussell King page = pfn_to_page(pfn); 84615653371SRussell King 84793f1d629SRussell King if (PageHighMem(page)) { 84815653371SRussell King if (len + offset > PAGE_SIZE) 84965af191aSRussell King len = PAGE_SIZE - offset; 850dd0f67f4SJoonsoo Kim 851dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 85239af22a7SNicolas Pitre vaddr = kmap_atomic(page); 8537e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 85439af22a7SNicolas Pitre kunmap_atomic(vaddr); 855dd0f67f4SJoonsoo Kim } else { 856dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 857dd0f67f4SJoonsoo Kim if (vaddr) { 858dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 859dd0f67f4SJoonsoo Kim kunmap_high(page); 860dd0f67f4SJoonsoo Kim } 86193f1d629SRussell King } 86293f1d629SRussell King } else { 86393f1d629SRussell King vaddr = page_address(page) + offset; 864a9c9147eSRussell King op(vaddr, len, dir); 86593f1d629SRussell King } 86665af191aSRussell King offset = 0; 86715653371SRussell King pfn++; 86865af191aSRussell King left -= len; 86965af191aSRussell King } while (left); 87065af191aSRussell King } 87165af191aSRussell King 87251fde349SMarek Szyprowski /* 87351fde349SMarek Szyprowski * Make an area consistent for devices. 87451fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 87551fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 87651fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 87751fde349SMarek Szyprowski */ 87851fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 87965af191aSRussell King size_t size, enum dma_data_direction dir) 88065af191aSRussell King { 88143377453SNicolas Pitre unsigned long paddr; 88243377453SNicolas Pitre 883a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 88443377453SNicolas Pitre 88565af191aSRussell King paddr = page_to_phys(page) + off; 8862ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 8872ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8882ffe2da3SRussell King } else { 8892ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 8902ffe2da3SRussell King } 8912ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 89243377453SNicolas Pitre } 8934ea0d737SRussell King 89451fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 8954ea0d737SRussell King size_t size, enum dma_data_direction dir) 8964ea0d737SRussell King { 8972ffe2da3SRussell King unsigned long paddr = page_to_phys(page) + off; 8982ffe2da3SRussell King 8992ffe2da3SRussell King /* FIXME: non-speculating: not required */ 9002ffe2da3SRussell King /* don't bother invalidating if DMA to device */ 9012ffe2da3SRussell King if (dir != DMA_TO_DEVICE) 9022ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9032ffe2da3SRussell King 904a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 905c0177800SCatalin Marinas 906c0177800SCatalin Marinas /* 907b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 908c0177800SCatalin Marinas */ 909b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 910b2a234edSMing Lei unsigned long pfn; 911b2a234edSMing Lei size_t left = size; 912b2a234edSMing Lei 913b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 914b2a234edSMing Lei off %= PAGE_SIZE; 915b2a234edSMing Lei if (off) { 916b2a234edSMing Lei pfn++; 917b2a234edSMing Lei left -= PAGE_SIZE - off; 918b2a234edSMing Lei } 919b2a234edSMing Lei while (left >= PAGE_SIZE) { 920b2a234edSMing Lei page = pfn_to_page(pfn++); 921c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 922b2a234edSMing Lei left -= PAGE_SIZE; 923b2a234edSMing Lei } 924b2a234edSMing Lei } 9254ea0d737SRussell King } 92643377453SNicolas Pitre 927afd1a321SRussell King /** 9282a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 929afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 930afd1a321SRussell King * @sg: list of buffers 931afd1a321SRussell King * @nents: number of buffers to map 932afd1a321SRussell King * @dir: DMA transfer direction 933afd1a321SRussell King * 934afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 935afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 936afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 937afd1a321SRussell King * appropriate dma address and length. They are obtained via 938afd1a321SRussell King * sg_dma_{address,length}. 939afd1a321SRussell King * 940afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 941afd1a321SRussell King * here. 942afd1a321SRussell King */ 9432dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 9442dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 945afd1a321SRussell King { 9462a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 947afd1a321SRussell King struct scatterlist *s; 94801135d92SRussell King int i, j; 949afd1a321SRussell King 950afd1a321SRussell King for_each_sg(sg, s, nents, i) { 9514ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 9524ce63fcdSMarek Szyprowski s->dma_length = s->length; 9534ce63fcdSMarek Szyprowski #endif 9542a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 9552a550e73SMarek Szyprowski s->length, dir, attrs); 95601135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 95701135d92SRussell King goto bad_mapping; 958afd1a321SRussell King } 959afd1a321SRussell King return nents; 96001135d92SRussell King 96101135d92SRussell King bad_mapping: 96201135d92SRussell King for_each_sg(sg, s, i, j) 9632a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 96401135d92SRussell King return 0; 965afd1a321SRussell King } 966afd1a321SRussell King 967afd1a321SRussell King /** 9682a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 969afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 970afd1a321SRussell King * @sg: list of buffers 9710adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 972afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 973afd1a321SRussell King * 974afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 975afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 976afd1a321SRussell King */ 9772dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 9782dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 979afd1a321SRussell King { 9802a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 98101135d92SRussell King struct scatterlist *s; 98201135d92SRussell King 98301135d92SRussell King int i; 98424056f52SRussell King 98501135d92SRussell King for_each_sg(sg, s, nents, i) 9862a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 987afd1a321SRussell King } 988afd1a321SRussell King 989afd1a321SRussell King /** 9902a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 991afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 992afd1a321SRussell King * @sg: list of buffers 993afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 994afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 995afd1a321SRussell King */ 9962dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 997afd1a321SRussell King int nents, enum dma_data_direction dir) 998afd1a321SRussell King { 9992a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 1000afd1a321SRussell King struct scatterlist *s; 1001afd1a321SRussell King int i; 1002afd1a321SRussell King 10032a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10042a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 10052a550e73SMarek Szyprowski dir); 1006afd1a321SRussell King } 100724056f52SRussell King 1008afd1a321SRussell King /** 10092a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 1010afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1011afd1a321SRussell King * @sg: list of buffers 1012afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1013afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1014afd1a321SRussell King */ 10152dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1016afd1a321SRussell King int nents, enum dma_data_direction dir) 1017afd1a321SRussell King { 10182a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 1019afd1a321SRussell King struct scatterlist *s; 1020afd1a321SRussell King int i; 1021afd1a321SRussell King 10222a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10232a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 10242a550e73SMarek Szyprowski dir); 1025afd1a321SRussell King } 102624056f52SRussell King 1027022ae537SRussell King /* 1028022ae537SRussell King * Return whether the given device DMA address mask can be supported 1029022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 1030022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 1031022ae537SRussell King * to this function. 1032022ae537SRussell King */ 1033022ae537SRussell King int dma_supported(struct device *dev, u64 mask) 1034022ae537SRussell King { 10354dcfa600SRussell King unsigned long limit; 10364dcfa600SRussell King 10374dcfa600SRussell King /* 10384dcfa600SRussell King * If the mask allows for more memory than we can address, 10394dcfa600SRussell King * and we actually have that much memory, then we must 10404dcfa600SRussell King * indicate that DMA to this device is not supported. 10414dcfa600SRussell King */ 10424dcfa600SRussell King if (sizeof(mask) != sizeof(dma_addr_t) && 10434dcfa600SRussell King mask > (dma_addr_t)~0 && 10444dcfa600SRussell King dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) 1045022ae537SRussell King return 0; 10464dcfa600SRussell King 10474dcfa600SRussell King /* 10484dcfa600SRussell King * Translate the device's DMA mask to a PFN limit. This 10494dcfa600SRussell King * PFN number includes the page which we can DMA to. 10504dcfa600SRussell King */ 10514dcfa600SRussell King limit = dma_to_pfn(dev, mask); 10524dcfa600SRussell King 10534dcfa600SRussell King if (limit < arm_dma_pfn_limit) 10544dcfa600SRussell King return 0; 10554dcfa600SRussell King 1056022ae537SRussell King return 1; 1057022ae537SRussell King } 1058022ae537SRussell King EXPORT_SYMBOL(dma_supported); 1059022ae537SRussell King 106087b54e78SGregory CLEMENT int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1061022ae537SRussell King { 1062022ae537SRussell King if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1063022ae537SRussell King return -EIO; 1064022ae537SRussell King 1065022ae537SRussell King *dev->dma_mask = dma_mask; 1066022ae537SRussell King 1067022ae537SRussell King return 0; 1068022ae537SRussell King } 1069022ae537SRussell King 107024056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES 4096 107124056f52SRussell King 107224056f52SRussell King static int __init dma_debug_do_init(void) 107324056f52SRussell King { 107424056f52SRussell King dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 107524056f52SRussell King return 0; 107624056f52SRussell King } 107724056f52SRussell King fs_initcall(dma_debug_do_init); 10784ce63fcdSMarek Szyprowski 10794ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 10804ce63fcdSMarek Szyprowski 10814ce63fcdSMarek Szyprowski /* IOMMU */ 10824ce63fcdSMarek Szyprowski 10834ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 10844ce63fcdSMarek Szyprowski size_t size) 10854ce63fcdSMarek Szyprowski { 10864ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 10874ce63fcdSMarek Szyprowski unsigned int align = 0; 10884ce63fcdSMarek Szyprowski unsigned int count, start; 10894ce63fcdSMarek Szyprowski unsigned long flags; 10904ce63fcdSMarek Szyprowski 109160460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 109260460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 109360460abfSSeung-Woo Kim 10944ce63fcdSMarek Szyprowski count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 10954ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 10964ce63fcdSMarek Szyprowski 10974ce63fcdSMarek Szyprowski if (order > mapping->order) 10984ce63fcdSMarek Szyprowski align = (1 << (order - mapping->order)) - 1; 10994ce63fcdSMarek Szyprowski 11004ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11014ce63fcdSMarek Szyprowski start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 11024ce63fcdSMarek Szyprowski count, align); 11034ce63fcdSMarek Szyprowski if (start > mapping->bits) { 11044ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11054ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 11064ce63fcdSMarek Szyprowski } 11074ce63fcdSMarek Szyprowski 11084ce63fcdSMarek Szyprowski bitmap_set(mapping->bitmap, start, count); 11094ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11104ce63fcdSMarek Szyprowski 11114ce63fcdSMarek Szyprowski return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 11124ce63fcdSMarek Szyprowski } 11134ce63fcdSMarek Szyprowski 11144ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 11154ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 11164ce63fcdSMarek Szyprowski { 11174ce63fcdSMarek Szyprowski unsigned int start = (addr - mapping->base) >> 11184ce63fcdSMarek Szyprowski (mapping->order + PAGE_SHIFT); 11194ce63fcdSMarek Szyprowski unsigned int count = ((size >> PAGE_SHIFT) + 11204ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 11214ce63fcdSMarek Szyprowski unsigned long flags; 11224ce63fcdSMarek Szyprowski 11234ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11244ce63fcdSMarek Szyprowski bitmap_clear(mapping->bitmap, start, count); 11254ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11264ce63fcdSMarek Szyprowski } 11274ce63fcdSMarek Szyprowski 1128549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1129549a17e4SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 11304ce63fcdSMarek Szyprowski { 11314ce63fcdSMarek Szyprowski struct page **pages; 11324ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 11334ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 11344ce63fcdSMarek Szyprowski int i = 0; 11354ce63fcdSMarek Szyprowski 11364ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 11374ce63fcdSMarek Szyprowski pages = kzalloc(array_size, gfp); 11384ce63fcdSMarek Szyprowski else 11394ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 11404ce63fcdSMarek Szyprowski if (!pages) 11414ce63fcdSMarek Szyprowski return NULL; 11424ce63fcdSMarek Szyprowski 1143549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1144549a17e4SMarek Szyprowski { 1145549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1146549a17e4SMarek Szyprowski struct page *page; 1147549a17e4SMarek Szyprowski 1148549a17e4SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 1149549a17e4SMarek Szyprowski if (!page) 1150549a17e4SMarek Szyprowski goto error; 1151549a17e4SMarek Szyprowski 1152549a17e4SMarek Szyprowski __dma_clear_buffer(page, size); 1153549a17e4SMarek Szyprowski 1154549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1155549a17e4SMarek Szyprowski pages[i] = page + i; 1156549a17e4SMarek Szyprowski 1157549a17e4SMarek Szyprowski return pages; 1158549a17e4SMarek Szyprowski } 1159549a17e4SMarek Szyprowski 1160f8669befSMarek Szyprowski /* 1161f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1162f8669befSMarek Szyprowski */ 1163f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1164f8669befSMarek Szyprowski 11654ce63fcdSMarek Szyprowski while (count) { 1166593f4735SMarek Szyprowski int j, order = __fls(count); 11674ce63fcdSMarek Szyprowski 1168f8669befSMarek Szyprowski pages[i] = alloc_pages(gfp, order); 11694ce63fcdSMarek Szyprowski while (!pages[i] && order) 1170f8669befSMarek Szyprowski pages[i] = alloc_pages(gfp, --order); 11714ce63fcdSMarek Szyprowski if (!pages[i]) 11724ce63fcdSMarek Szyprowski goto error; 11734ce63fcdSMarek Szyprowski 11745a796eebSHiroshi Doyu if (order) { 11754ce63fcdSMarek Szyprowski split_page(pages[i], order); 11764ce63fcdSMarek Szyprowski j = 1 << order; 11774ce63fcdSMarek Szyprowski while (--j) 11784ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 11795a796eebSHiroshi Doyu } 11804ce63fcdSMarek Szyprowski 11814ce63fcdSMarek Szyprowski __dma_clear_buffer(pages[i], PAGE_SIZE << order); 11824ce63fcdSMarek Szyprowski i += 1 << order; 11834ce63fcdSMarek Szyprowski count -= 1 << order; 11844ce63fcdSMarek Szyprowski } 11854ce63fcdSMarek Szyprowski 11864ce63fcdSMarek Szyprowski return pages; 11874ce63fcdSMarek Szyprowski error: 11889fa8af91SMarek Szyprowski while (i--) 11894ce63fcdSMarek Szyprowski if (pages[i]) 11904ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 119146c87852SPrathyush K if (array_size <= PAGE_SIZE) 11924ce63fcdSMarek Szyprowski kfree(pages); 11934ce63fcdSMarek Szyprowski else 11944ce63fcdSMarek Szyprowski vfree(pages); 11954ce63fcdSMarek Szyprowski return NULL; 11964ce63fcdSMarek Szyprowski } 11974ce63fcdSMarek Szyprowski 1198549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 1199549a17e4SMarek Szyprowski size_t size, struct dma_attrs *attrs) 12004ce63fcdSMarek Szyprowski { 12014ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12024ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 12034ce63fcdSMarek Szyprowski int i; 1204549a17e4SMarek Szyprowski 1205549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1206549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1207549a17e4SMarek Szyprowski } else { 12084ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 12094ce63fcdSMarek Szyprowski if (pages[i]) 12104ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1211549a17e4SMarek Szyprowski } 1212549a17e4SMarek Szyprowski 121346c87852SPrathyush K if (array_size <= PAGE_SIZE) 12144ce63fcdSMarek Szyprowski kfree(pages); 12154ce63fcdSMarek Szyprowski else 12164ce63fcdSMarek Szyprowski vfree(pages); 12174ce63fcdSMarek Szyprowski return 0; 12184ce63fcdSMarek Szyprowski } 12194ce63fcdSMarek Szyprowski 12204ce63fcdSMarek Szyprowski /* 12214ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 12224ce63fcdSMarek Szyprowski */ 12234ce63fcdSMarek Szyprowski static void * 1224e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1225e9da6e99SMarek Szyprowski const void *caller) 12264ce63fcdSMarek Szyprowski { 1227e9da6e99SMarek Szyprowski unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1228e9da6e99SMarek Szyprowski struct vm_struct *area; 1229e9da6e99SMarek Szyprowski unsigned long p; 12304ce63fcdSMarek Szyprowski 1231e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 1232e9da6e99SMarek Szyprowski caller); 1233e9da6e99SMarek Szyprowski if (!area) 12344ce63fcdSMarek Szyprowski return NULL; 1235e9da6e99SMarek Szyprowski 1236e9da6e99SMarek Szyprowski area->pages = pages; 1237e9da6e99SMarek Szyprowski area->nr_pages = nr_pages; 1238e9da6e99SMarek Szyprowski p = (unsigned long)area->addr; 1239e9da6e99SMarek Szyprowski 1240e9da6e99SMarek Szyprowski for (i = 0; i < nr_pages; i++) { 1241e9da6e99SMarek Szyprowski phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); 1242e9da6e99SMarek Szyprowski if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) 1243e9da6e99SMarek Szyprowski goto err; 1244e9da6e99SMarek Szyprowski p += PAGE_SIZE; 12454ce63fcdSMarek Szyprowski } 1246e9da6e99SMarek Szyprowski return area->addr; 1247e9da6e99SMarek Szyprowski err: 1248e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)area->addr, size); 1249e9da6e99SMarek Szyprowski vunmap(area->addr); 12504ce63fcdSMarek Szyprowski return NULL; 12514ce63fcdSMarek Szyprowski } 12524ce63fcdSMarek Szyprowski 12534ce63fcdSMarek Szyprowski /* 12544ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 12554ce63fcdSMarek Szyprowski */ 12564ce63fcdSMarek Szyprowski static dma_addr_t 12574ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 12584ce63fcdSMarek Szyprowski { 12594ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12604ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 12614ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 12624ce63fcdSMarek Szyprowski int i, ret = DMA_ERROR_CODE; 12634ce63fcdSMarek Szyprowski 12644ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 12654ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 12664ce63fcdSMarek Szyprowski return dma_addr; 12674ce63fcdSMarek Szyprowski 12684ce63fcdSMarek Szyprowski iova = dma_addr; 12694ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 12704ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 12714ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 12724ce63fcdSMarek Szyprowski unsigned int len, j; 12734ce63fcdSMarek Szyprowski 12744ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 12754ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 12764ce63fcdSMarek Szyprowski break; 12774ce63fcdSMarek Szyprowski 12784ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1279c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 1280c9b24996SAndreas Herrmann IOMMU_READ|IOMMU_WRITE); 12814ce63fcdSMarek Szyprowski if (ret < 0) 12824ce63fcdSMarek Szyprowski goto fail; 12834ce63fcdSMarek Szyprowski iova += len; 12844ce63fcdSMarek Szyprowski i = j; 12854ce63fcdSMarek Szyprowski } 12864ce63fcdSMarek Szyprowski return dma_addr; 12874ce63fcdSMarek Szyprowski fail: 12884ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 12894ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 12904ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 12914ce63fcdSMarek Szyprowski } 12924ce63fcdSMarek Szyprowski 12934ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 12944ce63fcdSMarek Szyprowski { 12954ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12964ce63fcdSMarek Szyprowski 12974ce63fcdSMarek Szyprowski /* 12984ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 12994ce63fcdSMarek Szyprowski * result to page size 13004ce63fcdSMarek Szyprowski */ 13014ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 13024ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 13034ce63fcdSMarek Szyprowski 13044ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 13054ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 13064ce63fcdSMarek Szyprowski return 0; 13074ce63fcdSMarek Szyprowski } 13084ce63fcdSMarek Szyprowski 1309665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1310665bad7bSHiroshi Doyu { 1311665bad7bSHiroshi Doyu struct dma_pool *pool = &atomic_pool; 1312665bad7bSHiroshi Doyu struct page **pages = pool->pages; 1313665bad7bSHiroshi Doyu int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1314665bad7bSHiroshi Doyu 1315665bad7bSHiroshi Doyu return pages + offs; 1316665bad7bSHiroshi Doyu } 1317665bad7bSHiroshi Doyu 1318955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1319e9da6e99SMarek Szyprowski { 1320e9da6e99SMarek Szyprowski struct vm_struct *area; 1321e9da6e99SMarek Szyprowski 1322665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1323665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1324665bad7bSHiroshi Doyu 1325955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1326955c757eSMarek Szyprowski return cpu_addr; 1327955c757eSMarek Szyprowski 1328e9da6e99SMarek Szyprowski area = find_vm_area(cpu_addr); 1329e9da6e99SMarek Szyprowski if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1330e9da6e99SMarek Szyprowski return area->pages; 1331e9da6e99SMarek Szyprowski return NULL; 1332e9da6e99SMarek Szyprowski } 1333e9da6e99SMarek Szyprowski 1334479ed93aSHiroshi Doyu static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1335479ed93aSHiroshi Doyu dma_addr_t *handle) 1336479ed93aSHiroshi Doyu { 1337479ed93aSHiroshi Doyu struct page *page; 1338479ed93aSHiroshi Doyu void *addr; 1339479ed93aSHiroshi Doyu 1340479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1341479ed93aSHiroshi Doyu if (!addr) 1342479ed93aSHiroshi Doyu return NULL; 1343479ed93aSHiroshi Doyu 1344479ed93aSHiroshi Doyu *handle = __iommu_create_mapping(dev, &page, size); 1345479ed93aSHiroshi Doyu if (*handle == DMA_ERROR_CODE) 1346479ed93aSHiroshi Doyu goto err_mapping; 1347479ed93aSHiroshi Doyu 1348479ed93aSHiroshi Doyu return addr; 1349479ed93aSHiroshi Doyu 1350479ed93aSHiroshi Doyu err_mapping: 1351479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1352479ed93aSHiroshi Doyu return NULL; 1353479ed93aSHiroshi Doyu } 1354479ed93aSHiroshi Doyu 1355d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1356479ed93aSHiroshi Doyu dma_addr_t handle, size_t size) 1357479ed93aSHiroshi Doyu { 1358479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 1359d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1360479ed93aSHiroshi Doyu } 1361479ed93aSHiroshi Doyu 13624ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 13634ce63fcdSMarek Szyprowski dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 13644ce63fcdSMarek Szyprowski { 13654ce63fcdSMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 13664ce63fcdSMarek Szyprowski struct page **pages; 13674ce63fcdSMarek Szyprowski void *addr = NULL; 13684ce63fcdSMarek Szyprowski 13694ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 13704ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13714ce63fcdSMarek Szyprowski 1372479ed93aSHiroshi Doyu if (gfp & GFP_ATOMIC) 1373479ed93aSHiroshi Doyu return __iommu_alloc_atomic(dev, size, handle); 1374479ed93aSHiroshi Doyu 13755b91a98cSRichard Zhao /* 13765b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 13775b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 13785b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 13795b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 13805b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 13815b91a98cSRichard Zhao */ 13825b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 13835b91a98cSRichard Zhao 1384549a17e4SMarek Szyprowski pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 13854ce63fcdSMarek Szyprowski if (!pages) 13864ce63fcdSMarek Szyprowski return NULL; 13874ce63fcdSMarek Szyprowski 13884ce63fcdSMarek Szyprowski *handle = __iommu_create_mapping(dev, pages, size); 13894ce63fcdSMarek Szyprowski if (*handle == DMA_ERROR_CODE) 13904ce63fcdSMarek Szyprowski goto err_buffer; 13914ce63fcdSMarek Szyprowski 1392955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1393955c757eSMarek Szyprowski return pages; 1394955c757eSMarek Szyprowski 1395e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1396e9da6e99SMarek Szyprowski __builtin_return_address(0)); 13974ce63fcdSMarek Szyprowski if (!addr) 13984ce63fcdSMarek Szyprowski goto err_mapping; 13994ce63fcdSMarek Szyprowski 14004ce63fcdSMarek Szyprowski return addr; 14014ce63fcdSMarek Szyprowski 14024ce63fcdSMarek Szyprowski err_mapping: 14034ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 14044ce63fcdSMarek Szyprowski err_buffer: 1405549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 14064ce63fcdSMarek Szyprowski return NULL; 14074ce63fcdSMarek Szyprowski } 14084ce63fcdSMarek Szyprowski 14094ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 14104ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 14114ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 14124ce63fcdSMarek Szyprowski { 14134ce63fcdSMarek Szyprowski unsigned long uaddr = vma->vm_start; 14144ce63fcdSMarek Szyprowski unsigned long usize = vma->vm_end - vma->vm_start; 1415955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1416e9da6e99SMarek Szyprowski 1417e9da6e99SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1418e9da6e99SMarek Szyprowski 1419e9da6e99SMarek Szyprowski if (!pages) 1420e9da6e99SMarek Szyprowski return -ENXIO; 14214ce63fcdSMarek Szyprowski 14224ce63fcdSMarek Szyprowski do { 1423e9da6e99SMarek Szyprowski int ret = vm_insert_page(vma, uaddr, *pages++); 14244ce63fcdSMarek Szyprowski if (ret) { 1425e9da6e99SMarek Szyprowski pr_err("Remapping memory failed: %d\n", ret); 14264ce63fcdSMarek Szyprowski return ret; 14274ce63fcdSMarek Szyprowski } 14284ce63fcdSMarek Szyprowski uaddr += PAGE_SIZE; 14294ce63fcdSMarek Szyprowski usize -= PAGE_SIZE; 14304ce63fcdSMarek Szyprowski } while (usize > 0); 1431e9da6e99SMarek Szyprowski 14324ce63fcdSMarek Szyprowski return 0; 14334ce63fcdSMarek Szyprowski } 14344ce63fcdSMarek Szyprowski 14354ce63fcdSMarek Szyprowski /* 14364ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 14374ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 14384ce63fcdSMarek Szyprowski */ 14394ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 14404ce63fcdSMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 14414ce63fcdSMarek Szyprowski { 1442836bfa0dSYoungJun Cho struct page **pages; 14434ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14444ce63fcdSMarek Szyprowski 1445479ed93aSHiroshi Doyu if (__in_atomic_pool(cpu_addr, size)) { 1446d5898291SMarek Szyprowski __iommu_free_atomic(dev, cpu_addr, handle, size); 1447479ed93aSHiroshi Doyu return; 1448479ed93aSHiroshi Doyu } 1449479ed93aSHiroshi Doyu 1450836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1451836bfa0dSYoungJun Cho if (!pages) { 1452836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1453836bfa0dSYoungJun Cho return; 1454836bfa0dSYoungJun Cho } 1455836bfa0dSYoungJun Cho 1456955c757eSMarek Szyprowski if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1457e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 1458e9da6e99SMarek Szyprowski vunmap(cpu_addr); 1459955c757eSMarek Szyprowski } 1460e9da6e99SMarek Szyprowski 14614ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1462549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 14634ce63fcdSMarek Szyprowski } 14644ce63fcdSMarek Szyprowski 1465dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1466dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 1467dc2832e1SMarek Szyprowski size_t size, struct dma_attrs *attrs) 1468dc2832e1SMarek Szyprowski { 1469dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1470dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1471dc2832e1SMarek Szyprowski 1472dc2832e1SMarek Szyprowski if (!pages) 1473dc2832e1SMarek Szyprowski return -ENXIO; 1474dc2832e1SMarek Szyprowski 1475dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1476dc2832e1SMarek Szyprowski GFP_KERNEL); 14774ce63fcdSMarek Szyprowski } 14784ce63fcdSMarek Szyprowski 1479c9b24996SAndreas Herrmann static int __dma_direction_to_prot(enum dma_data_direction dir) 1480c9b24996SAndreas Herrmann { 1481c9b24996SAndreas Herrmann int prot; 1482c9b24996SAndreas Herrmann 1483c9b24996SAndreas Herrmann switch (dir) { 1484c9b24996SAndreas Herrmann case DMA_BIDIRECTIONAL: 1485c9b24996SAndreas Herrmann prot = IOMMU_READ | IOMMU_WRITE; 1486c9b24996SAndreas Herrmann break; 1487c9b24996SAndreas Herrmann case DMA_TO_DEVICE: 1488c9b24996SAndreas Herrmann prot = IOMMU_READ; 1489c9b24996SAndreas Herrmann break; 1490c9b24996SAndreas Herrmann case DMA_FROM_DEVICE: 1491c9b24996SAndreas Herrmann prot = IOMMU_WRITE; 1492c9b24996SAndreas Herrmann break; 1493c9b24996SAndreas Herrmann default: 1494c9b24996SAndreas Herrmann prot = 0; 1495c9b24996SAndreas Herrmann } 1496c9b24996SAndreas Herrmann 1497c9b24996SAndreas Herrmann return prot; 1498c9b24996SAndreas Herrmann } 1499c9b24996SAndreas Herrmann 15004ce63fcdSMarek Szyprowski /* 15014ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 15024ce63fcdSMarek Szyprowski */ 15034ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 15044ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 15050fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 15060fa478dfSRob Herring bool is_coherent) 15074ce63fcdSMarek Szyprowski { 15084ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 15094ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 15104ce63fcdSMarek Szyprowski int ret = 0; 15114ce63fcdSMarek Szyprowski unsigned int count; 15124ce63fcdSMarek Szyprowski struct scatterlist *s; 1513c9b24996SAndreas Herrmann int prot; 15144ce63fcdSMarek Szyprowski 15154ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15164ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 15174ce63fcdSMarek Szyprowski 15184ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 15194ce63fcdSMarek Szyprowski if (iova == DMA_ERROR_CODE) 15204ce63fcdSMarek Szyprowski return -ENOMEM; 15214ce63fcdSMarek Szyprowski 15224ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 15234ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(sg_page(s)); 15244ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 15254ce63fcdSMarek Szyprowski 15260fa478dfSRob Herring if (!is_coherent && 152797ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 15284ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 15294ce63fcdSMarek Szyprowski 1530c9b24996SAndreas Herrmann prot = __dma_direction_to_prot(dir); 1531c9b24996SAndreas Herrmann 1532c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 15334ce63fcdSMarek Szyprowski if (ret < 0) 15344ce63fcdSMarek Szyprowski goto fail; 15354ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 15364ce63fcdSMarek Szyprowski iova += len; 15374ce63fcdSMarek Szyprowski } 15384ce63fcdSMarek Szyprowski *handle = iova_base; 15394ce63fcdSMarek Szyprowski 15404ce63fcdSMarek Szyprowski return 0; 15414ce63fcdSMarek Szyprowski fail: 15424ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 15434ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 15444ce63fcdSMarek Szyprowski return ret; 15454ce63fcdSMarek Szyprowski } 15464ce63fcdSMarek Szyprowski 15470fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 15480fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 15490fa478dfSRob Herring bool is_coherent) 15504ce63fcdSMarek Szyprowski { 15514ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 15524ce63fcdSMarek Szyprowski int i, count = 0; 15534ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 15544ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 15554ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 15564ce63fcdSMarek Szyprowski 15574ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 15584ce63fcdSMarek Szyprowski s = sg_next(s); 15594ce63fcdSMarek Szyprowski 15604ce63fcdSMarek Szyprowski s->dma_address = DMA_ERROR_CODE; 15614ce63fcdSMarek Szyprowski s->dma_length = 0; 15624ce63fcdSMarek Szyprowski 15634ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 15644ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 15650fa478dfSRob Herring dir, attrs, is_coherent) < 0) 15664ce63fcdSMarek Szyprowski goto bad_mapping; 15674ce63fcdSMarek Szyprowski 15684ce63fcdSMarek Szyprowski dma->dma_address += offset; 15694ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 15704ce63fcdSMarek Szyprowski 15714ce63fcdSMarek Szyprowski size = offset = s->offset; 15724ce63fcdSMarek Szyprowski start = s; 15734ce63fcdSMarek Szyprowski dma = sg_next(dma); 15744ce63fcdSMarek Szyprowski count += 1; 15754ce63fcdSMarek Szyprowski } 15764ce63fcdSMarek Szyprowski size += s->length; 15774ce63fcdSMarek Szyprowski } 15780fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 15790fa478dfSRob Herring is_coherent) < 0) 15804ce63fcdSMarek Szyprowski goto bad_mapping; 15814ce63fcdSMarek Szyprowski 15824ce63fcdSMarek Szyprowski dma->dma_address += offset; 15834ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 15844ce63fcdSMarek Szyprowski 15854ce63fcdSMarek Szyprowski return count+1; 15864ce63fcdSMarek Szyprowski 15874ce63fcdSMarek Szyprowski bad_mapping: 15884ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 15894ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 15904ce63fcdSMarek Szyprowski return 0; 15914ce63fcdSMarek Szyprowski } 15924ce63fcdSMarek Szyprowski 15934ce63fcdSMarek Szyprowski /** 15940fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 15950fa478dfSRob Herring * @dev: valid struct device pointer 15960fa478dfSRob Herring * @sg: list of buffers 15970fa478dfSRob Herring * @nents: number of buffers to map 15980fa478dfSRob Herring * @dir: DMA transfer direction 15990fa478dfSRob Herring * 16000fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 16010fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 16020fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 16030fa478dfSRob Herring * obtained via sg_dma_{address,length}. 16040fa478dfSRob Herring */ 16050fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 16060fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 16070fa478dfSRob Herring { 16080fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 16090fa478dfSRob Herring } 16100fa478dfSRob Herring 16110fa478dfSRob Herring /** 16120fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 16130fa478dfSRob Herring * @dev: valid struct device pointer 16140fa478dfSRob Herring * @sg: list of buffers 16150fa478dfSRob Herring * @nents: number of buffers to map 16160fa478dfSRob Herring * @dir: DMA transfer direction 16170fa478dfSRob Herring * 16180fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 16190fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 16200fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 16210fa478dfSRob Herring * sg_dma_{address,length}. 16220fa478dfSRob Herring */ 16230fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 16240fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 16250fa478dfSRob Herring { 16260fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 16270fa478dfSRob Herring } 16280fa478dfSRob Herring 16290fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 16300fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 16310fa478dfSRob Herring bool is_coherent) 16320fa478dfSRob Herring { 16330fa478dfSRob Herring struct scatterlist *s; 16340fa478dfSRob Herring int i; 16350fa478dfSRob Herring 16360fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 16370fa478dfSRob Herring if (sg_dma_len(s)) 16380fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 16390fa478dfSRob Herring sg_dma_len(s)); 16400fa478dfSRob Herring if (!is_coherent && 16410fa478dfSRob Herring !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 16420fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 16430fa478dfSRob Herring s->length, dir); 16440fa478dfSRob Herring } 16450fa478dfSRob Herring } 16460fa478dfSRob Herring 16470fa478dfSRob Herring /** 16480fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 16490fa478dfSRob Herring * @dev: valid struct device pointer 16500fa478dfSRob Herring * @sg: list of buffers 16510fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 16520fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16530fa478dfSRob Herring * 16540fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 16550fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 16560fa478dfSRob Herring */ 16570fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 16580fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 16590fa478dfSRob Herring { 16600fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 16610fa478dfSRob Herring } 16620fa478dfSRob Herring 16630fa478dfSRob Herring /** 16644ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 16654ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16664ce63fcdSMarek Szyprowski * @sg: list of buffers 16674ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 16684ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16694ce63fcdSMarek Szyprowski * 16704ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 16714ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 16724ce63fcdSMarek Szyprowski */ 16734ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 16744ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 16754ce63fcdSMarek Szyprowski { 16760fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 16774ce63fcdSMarek Szyprowski } 16784ce63fcdSMarek Szyprowski 16794ce63fcdSMarek Szyprowski /** 16804ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 16814ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16824ce63fcdSMarek Szyprowski * @sg: list of buffers 16834ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 16844ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16854ce63fcdSMarek Szyprowski */ 16864ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 16874ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 16884ce63fcdSMarek Szyprowski { 16894ce63fcdSMarek Szyprowski struct scatterlist *s; 16904ce63fcdSMarek Szyprowski int i; 16914ce63fcdSMarek Szyprowski 16924ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 16934ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 16944ce63fcdSMarek Szyprowski 16954ce63fcdSMarek Szyprowski } 16964ce63fcdSMarek Szyprowski 16974ce63fcdSMarek Szyprowski /** 16984ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 16994ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17004ce63fcdSMarek Szyprowski * @sg: list of buffers 17014ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 17024ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17034ce63fcdSMarek Szyprowski */ 17044ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 17054ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 17064ce63fcdSMarek Szyprowski { 17074ce63fcdSMarek Szyprowski struct scatterlist *s; 17084ce63fcdSMarek Szyprowski int i; 17094ce63fcdSMarek Szyprowski 17104ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 17114ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 17124ce63fcdSMarek Szyprowski } 17134ce63fcdSMarek Szyprowski 17144ce63fcdSMarek Szyprowski 17154ce63fcdSMarek Szyprowski /** 17160fa478dfSRob Herring * arm_coherent_iommu_map_page 17170fa478dfSRob Herring * @dev: valid struct device pointer 17180fa478dfSRob Herring * @page: page that buffer resides in 17190fa478dfSRob Herring * @offset: offset into page for start of buffer 17200fa478dfSRob Herring * @size: size of buffer to map 17210fa478dfSRob Herring * @dir: DMA transfer direction 17220fa478dfSRob Herring * 17230fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 17240fa478dfSRob Herring */ 17250fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 17260fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 17270fa478dfSRob Herring struct dma_attrs *attrs) 17280fa478dfSRob Herring { 17290fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17300fa478dfSRob Herring dma_addr_t dma_addr; 173113987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 17320fa478dfSRob Herring 17330fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 17340fa478dfSRob Herring if (dma_addr == DMA_ERROR_CODE) 17350fa478dfSRob Herring return dma_addr; 17360fa478dfSRob Herring 1737c9b24996SAndreas Herrmann prot = __dma_direction_to_prot(dir); 173813987d68SWill Deacon 173913987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 17400fa478dfSRob Herring if (ret < 0) 17410fa478dfSRob Herring goto fail; 17420fa478dfSRob Herring 17430fa478dfSRob Herring return dma_addr + offset; 17440fa478dfSRob Herring fail: 17450fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 17460fa478dfSRob Herring return DMA_ERROR_CODE; 17470fa478dfSRob Herring } 17480fa478dfSRob Herring 17490fa478dfSRob Herring /** 17504ce63fcdSMarek Szyprowski * arm_iommu_map_page 17514ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17524ce63fcdSMarek Szyprowski * @page: page that buffer resides in 17534ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 17544ce63fcdSMarek Szyprowski * @size: size of buffer to map 17554ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 17564ce63fcdSMarek Szyprowski * 17574ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 17584ce63fcdSMarek Szyprowski */ 17594ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 17604ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 17614ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 17624ce63fcdSMarek Szyprowski { 17630fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 17644ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 17654ce63fcdSMarek Szyprowski 17660fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 17670fa478dfSRob Herring } 17684ce63fcdSMarek Szyprowski 17690fa478dfSRob Herring /** 17700fa478dfSRob Herring * arm_coherent_iommu_unmap_page 17710fa478dfSRob Herring * @dev: valid struct device pointer 17720fa478dfSRob Herring * @handle: DMA address of buffer 17730fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 17740fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 17750fa478dfSRob Herring * 17760fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 17770fa478dfSRob Herring */ 17780fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 17790fa478dfSRob Herring size_t size, enum dma_data_direction dir, 17800fa478dfSRob Herring struct dma_attrs *attrs) 17810fa478dfSRob Herring { 17820fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17830fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 17840fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 17850fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 17864ce63fcdSMarek Szyprowski 17870fa478dfSRob Herring if (!iova) 17880fa478dfSRob Herring return; 17890fa478dfSRob Herring 17900fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 17910fa478dfSRob Herring __free_iova(mapping, iova, len); 17924ce63fcdSMarek Szyprowski } 17934ce63fcdSMarek Szyprowski 17944ce63fcdSMarek Szyprowski /** 17954ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 17964ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17974ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 17984ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 17994ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 18004ce63fcdSMarek Szyprowski * 18014ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 18024ce63fcdSMarek Szyprowski */ 18034ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 18044ce63fcdSMarek Szyprowski size_t size, enum dma_data_direction dir, 18054ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 18064ce63fcdSMarek Szyprowski { 18074ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 18084ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 18094ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 18104ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 18114ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 18124ce63fcdSMarek Szyprowski 18134ce63fcdSMarek Szyprowski if (!iova) 18144ce63fcdSMarek Szyprowski return; 18154ce63fcdSMarek Szyprowski 18160fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 18174ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 18184ce63fcdSMarek Szyprowski 18194ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 18204ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 18214ce63fcdSMarek Szyprowski } 18224ce63fcdSMarek Szyprowski 18234ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 18244ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 18254ce63fcdSMarek Szyprowski { 18264ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 18274ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 18284ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 18294ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 18304ce63fcdSMarek Szyprowski 18314ce63fcdSMarek Szyprowski if (!iova) 18324ce63fcdSMarek Szyprowski return; 18334ce63fcdSMarek Szyprowski 18344ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 18354ce63fcdSMarek Szyprowski } 18364ce63fcdSMarek Szyprowski 18374ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 18384ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 18394ce63fcdSMarek Szyprowski { 18404ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 18414ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 18424ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 18434ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 18444ce63fcdSMarek Szyprowski 18454ce63fcdSMarek Szyprowski if (!iova) 18464ce63fcdSMarek Szyprowski return; 18474ce63fcdSMarek Szyprowski 18484ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 18494ce63fcdSMarek Szyprowski } 18504ce63fcdSMarek Szyprowski 18514ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = { 18524ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 18534ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 18544ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1855dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 18564ce63fcdSMarek Szyprowski 18574ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 18584ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 18594ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 18604ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 18614ce63fcdSMarek Szyprowski 18624ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 18634ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 18644ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 18654ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1866d09e1333SHiroshi Doyu 1867d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 18684ce63fcdSMarek Szyprowski }; 18694ce63fcdSMarek Szyprowski 18700fa478dfSRob Herring struct dma_map_ops iommu_coherent_ops = { 18710fa478dfSRob Herring .alloc = arm_iommu_alloc_attrs, 18720fa478dfSRob Herring .free = arm_iommu_free_attrs, 18730fa478dfSRob Herring .mmap = arm_iommu_mmap_attrs, 18740fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 18750fa478dfSRob Herring 18760fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 18770fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 18780fa478dfSRob Herring 18790fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 18800fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 1881d09e1333SHiroshi Doyu 1882d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 18830fa478dfSRob Herring }; 18840fa478dfSRob Herring 18854ce63fcdSMarek Szyprowski /** 18864ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 18874ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 18884ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 18894ce63fcdSMarek Szyprowski * @size: size of the valid IO address space 18904ce63fcdSMarek Szyprowski * @order: accuracy of the IO addresses allocations 18914ce63fcdSMarek Szyprowski * 18924ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 18934ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 18944ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 18954ce63fcdSMarek Szyprowski * 18964ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 18974ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 18984ce63fcdSMarek Szyprowski */ 18994ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 19004ce63fcdSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 19014ce63fcdSMarek Szyprowski int order) 19024ce63fcdSMarek Szyprowski { 19034ce63fcdSMarek Szyprowski unsigned int count = size >> (PAGE_SHIFT + order); 19044ce63fcdSMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 19054ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 19064ce63fcdSMarek Szyprowski int err = -ENOMEM; 19074ce63fcdSMarek Szyprowski 19084ce63fcdSMarek Szyprowski if (!count) 19094ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 19104ce63fcdSMarek Szyprowski 19114ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 19124ce63fcdSMarek Szyprowski if (!mapping) 19134ce63fcdSMarek Szyprowski goto err; 19144ce63fcdSMarek Szyprowski 19154ce63fcdSMarek Szyprowski mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 19164ce63fcdSMarek Szyprowski if (!mapping->bitmap) 19174ce63fcdSMarek Szyprowski goto err2; 19184ce63fcdSMarek Szyprowski 19194ce63fcdSMarek Szyprowski mapping->base = base; 19204ce63fcdSMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 19214ce63fcdSMarek Szyprowski mapping->order = order; 19224ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 19234ce63fcdSMarek Szyprowski 19244ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 19254ce63fcdSMarek Szyprowski if (!mapping->domain) 19264ce63fcdSMarek Szyprowski goto err3; 19274ce63fcdSMarek Szyprowski 19284ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 19294ce63fcdSMarek Szyprowski return mapping; 19304ce63fcdSMarek Szyprowski err3: 19314ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 19324ce63fcdSMarek Szyprowski err2: 19334ce63fcdSMarek Szyprowski kfree(mapping); 19344ce63fcdSMarek Szyprowski err: 19354ce63fcdSMarek Szyprowski return ERR_PTR(err); 19364ce63fcdSMarek Szyprowski } 193718177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 19384ce63fcdSMarek Szyprowski 19394ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 19404ce63fcdSMarek Szyprowski { 19414ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 19424ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 19434ce63fcdSMarek Szyprowski 19444ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 19454ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 19464ce63fcdSMarek Szyprowski kfree(mapping); 19474ce63fcdSMarek Szyprowski } 19484ce63fcdSMarek Szyprowski 19494ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 19504ce63fcdSMarek Szyprowski { 19514ce63fcdSMarek Szyprowski if (mapping) 19524ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 19534ce63fcdSMarek Szyprowski } 195418177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 19554ce63fcdSMarek Szyprowski 19564ce63fcdSMarek Szyprowski /** 19574ce63fcdSMarek Szyprowski * arm_iommu_attach_device 19584ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 19594ce63fcdSMarek Szyprowski * @mapping: io address space mapping structure (returned from 19604ce63fcdSMarek Szyprowski * arm_iommu_create_mapping) 19614ce63fcdSMarek Szyprowski * 19624ce63fcdSMarek Szyprowski * Attaches specified io address space mapping to the provided device, 19634ce63fcdSMarek Szyprowski * this replaces the dma operations (dma_map_ops pointer) with the 19644ce63fcdSMarek Szyprowski * IOMMU aware version. More than one client might be attached to 19654ce63fcdSMarek Szyprowski * the same io address space mapping. 19664ce63fcdSMarek Szyprowski */ 19674ce63fcdSMarek Szyprowski int arm_iommu_attach_device(struct device *dev, 19684ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 19694ce63fcdSMarek Szyprowski { 19704ce63fcdSMarek Szyprowski int err; 19714ce63fcdSMarek Szyprowski 19724ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 19734ce63fcdSMarek Szyprowski if (err) 19744ce63fcdSMarek Szyprowski return err; 19754ce63fcdSMarek Szyprowski 19764ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 19774ce63fcdSMarek Szyprowski dev->archdata.mapping = mapping; 19784ce63fcdSMarek Szyprowski set_dma_ops(dev, &iommu_ops); 19794ce63fcdSMarek Szyprowski 198075c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 19814ce63fcdSMarek Szyprowski return 0; 19824ce63fcdSMarek Szyprowski } 198318177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 19844ce63fcdSMarek Szyprowski 19856fe36758SHiroshi Doyu /** 19866fe36758SHiroshi Doyu * arm_iommu_detach_device 19876fe36758SHiroshi Doyu * @dev: valid struct device pointer 19886fe36758SHiroshi Doyu * 19896fe36758SHiroshi Doyu * Detaches the provided device from a previously attached map. 19906fe36758SHiroshi Doyu * This voids the dma operations (dma_map_ops pointer) 19916fe36758SHiroshi Doyu */ 19926fe36758SHiroshi Doyu void arm_iommu_detach_device(struct device *dev) 19936fe36758SHiroshi Doyu { 19946fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 19956fe36758SHiroshi Doyu 19966fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 19976fe36758SHiroshi Doyu if (!mapping) { 19986fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 19996fe36758SHiroshi Doyu return; 20006fe36758SHiroshi Doyu } 20016fe36758SHiroshi Doyu 20026fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 20036fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 20049e4b259dSWill Deacon dev->archdata.mapping = NULL; 20056fe36758SHiroshi Doyu set_dma_ops(dev, NULL); 20066fe36758SHiroshi Doyu 20076fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 20086fe36758SHiroshi Doyu } 200918177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 20106fe36758SHiroshi Doyu 20114ce63fcdSMarek Szyprowski #endif 2012