10ddbccd1SRussell King /* 20ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 30ddbccd1SRussell King * 40ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 50ddbccd1SRussell King * 60ddbccd1SRussell King * This program is free software; you can redistribute it and/or modify 70ddbccd1SRussell King * it under the terms of the GNU General Public License version 2 as 80ddbccd1SRussell King * published by the Free Software Foundation. 90ddbccd1SRussell King * 100ddbccd1SRussell King * DMA uncached mapping support. 110ddbccd1SRussell King */ 120ddbccd1SRussell King #include <linux/module.h> 130ddbccd1SRussell King #include <linux/mm.h> 145a0e3ad6STejun Heo #include <linux/gfp.h> 150ddbccd1SRussell King #include <linux/errno.h> 160ddbccd1SRussell King #include <linux/list.h> 170ddbccd1SRussell King #include <linux/init.h> 180ddbccd1SRussell King #include <linux/device.h> 190ddbccd1SRussell King #include <linux/dma-mapping.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 280ddbccd1SRussell King 290ddbccd1SRussell King #include <asm/memory.h> 3043377453SNicolas Pitre #include <asm/highmem.h> 310ddbccd1SRussell King #include <asm/cacheflush.h> 320ddbccd1SRussell King #include <asm/tlbflush.h> 3399d1717dSJon Medhurst #include <asm/mach/arch.h> 344ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 35c7909509SMarek Szyprowski #include <asm/mach/map.h> 36c7909509SMarek Szyprowski #include <asm/system_info.h> 37c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 380ddbccd1SRussell King 39022ae537SRussell King #include "mm.h" 40022ae537SRussell King 4115237e1fSMarek Szyprowski /* 4215237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 4315237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 4415237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 4515237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 4615237e1fSMarek Szyprowski * 4715237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 4815237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 4915237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 5015237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 5115237e1fSMarek Szyprowski * 5215237e1fSMarek Szyprowski */ 5351fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 5415237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5551fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 5615237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5715237e1fSMarek Szyprowski 582dc6a016SMarek Szyprowski /** 592dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 602dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 612dc6a016SMarek Szyprowski * @page: page that buffer resides in 622dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 632dc6a016SMarek Szyprowski * @size: size of buffer to map 642dc6a016SMarek Szyprowski * @dir: DMA transfer direction 652dc6a016SMarek Szyprowski * 662dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 672dc6a016SMarek Szyprowski * or written back. 682dc6a016SMarek Szyprowski * 692dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 702dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 712dc6a016SMarek Szyprowski */ 7251fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 732dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 742dc6a016SMarek Szyprowski struct dma_attrs *attrs) 752dc6a016SMarek Szyprowski { 7697ef952aSMarek Szyprowski if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 7751fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 7851fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 792dc6a016SMarek Szyprowski } 802dc6a016SMarek Szyprowski 812dc6a016SMarek Szyprowski /** 822dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 832dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 842dc6a016SMarek Szyprowski * @handle: DMA address of buffer 852dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 862dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 872dc6a016SMarek Szyprowski * 882dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 892dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 902dc6a016SMarek Szyprowski * All other usages are undefined. 912dc6a016SMarek Szyprowski * 922dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 932dc6a016SMarek Szyprowski * whatever the device wrote there. 942dc6a016SMarek Szyprowski */ 9551fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 962dc6a016SMarek Szyprowski size_t size, enum dma_data_direction dir, 972dc6a016SMarek Szyprowski struct dma_attrs *attrs) 982dc6a016SMarek Szyprowski { 9997ef952aSMarek Szyprowski if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 10051fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 10151fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1022dc6a016SMarek Szyprowski } 1032dc6a016SMarek Szyprowski 10451fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1052dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1062dc6a016SMarek Szyprowski { 1072dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1082dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 10951fde349SMarek Szyprowski if (!arch_is_coherent()) 1102dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1112dc6a016SMarek Szyprowski } 1122dc6a016SMarek Szyprowski 11351fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1142dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1152dc6a016SMarek Szyprowski { 1162dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1172dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 11851fde349SMarek Szyprowski if (!arch_is_coherent()) 1192dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1202dc6a016SMarek Szyprowski } 1212dc6a016SMarek Szyprowski 1222dc6a016SMarek Szyprowski static int arm_dma_set_mask(struct device *dev, u64 dma_mask); 1232dc6a016SMarek Szyprowski 1242dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = { 125f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 126f99d6034SMarek Szyprowski .free = arm_dma_free, 127f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 128dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1292dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1302dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1312dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1322dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 1332dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1342dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1352dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1362dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 1372dc6a016SMarek Szyprowski .set_dma_mask = arm_dma_set_mask, 1382dc6a016SMarek Szyprowski }; 1392dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1402dc6a016SMarek Szyprowski 141ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 142ab6494f0SCatalin Marinas { 143022ae537SRussell King u64 mask = (u64)arm_dma_limit; 1440ddbccd1SRussell King 145ab6494f0SCatalin Marinas if (dev) { 146ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 147ab6494f0SCatalin Marinas 148ab6494f0SCatalin Marinas /* 149ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 150ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 151ab6494f0SCatalin Marinas */ 152ab6494f0SCatalin Marinas if (mask == 0) { 153ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 154ab6494f0SCatalin Marinas return 0; 155ab6494f0SCatalin Marinas } 156ab6494f0SCatalin Marinas 157022ae537SRussell King if ((~mask) & (u64)arm_dma_limit) { 158ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask %#llx is smaller " 159ab6494f0SCatalin Marinas "than system GFP_DMA mask %#llx\n", 160022ae537SRussell King mask, (u64)arm_dma_limit); 161ab6494f0SCatalin Marinas return 0; 162ab6494f0SCatalin Marinas } 163ab6494f0SCatalin Marinas } 164ab6494f0SCatalin Marinas 165ab6494f0SCatalin Marinas return mask; 166ab6494f0SCatalin Marinas } 167ab6494f0SCatalin Marinas 168c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size) 169c7909509SMarek Szyprowski { 170c7909509SMarek Szyprowski void *ptr; 171c7909509SMarek Szyprowski /* 172c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 173c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 174c7909509SMarek Szyprowski */ 175c7909509SMarek Szyprowski ptr = page_address(page); 1764ce63fcdSMarek Szyprowski if (ptr) { 177c7909509SMarek Szyprowski memset(ptr, 0, size); 178c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 179c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 180c7909509SMarek Szyprowski } 1814ce63fcdSMarek Szyprowski } 182c7909509SMarek Szyprowski 1837a9a32a9SRussell King /* 1847a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 1857a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 1867a9a32a9SRussell King */ 1877a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 1887a9a32a9SRussell King { 1897a9a32a9SRussell King unsigned long order = get_order(size); 1907a9a32a9SRussell King struct page *page, *p, *e; 1917a9a32a9SRussell King 1927a9a32a9SRussell King page = alloc_pages(gfp, order); 1937a9a32a9SRussell King if (!page) 1947a9a32a9SRussell King return NULL; 1957a9a32a9SRussell King 1967a9a32a9SRussell King /* 1977a9a32a9SRussell King * Now split the huge page and free the excess pages 1987a9a32a9SRussell King */ 1997a9a32a9SRussell King split_page(page, order); 2007a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2017a9a32a9SRussell King __free_page(p); 2027a9a32a9SRussell King 203c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 2047a9a32a9SRussell King 2057a9a32a9SRussell King return page; 2067a9a32a9SRussell King } 2077a9a32a9SRussell King 2087a9a32a9SRussell King /* 2097a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 2107a9a32a9SRussell King */ 2117a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 2127a9a32a9SRussell King { 2137a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 2147a9a32a9SRussell King 2157a9a32a9SRussell King while (page < e) { 2167a9a32a9SRussell King __free_page(page); 2177a9a32a9SRussell King page++; 2187a9a32a9SRussell King } 2197a9a32a9SRussell King } 2207a9a32a9SRussell King 221ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 2220ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE 2230ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB 2240ddbccd1SRussell King #endif 2250ddbccd1SRussell King 226c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 227c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page); 228c7909509SMarek Szyprowski 229e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 230e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 231e9da6e99SMarek Szyprowski const void *caller); 232e9da6e99SMarek Szyprowski 233e9da6e99SMarek Szyprowski static void * 234e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 235e9da6e99SMarek Szyprowski const void *caller) 236e9da6e99SMarek Szyprowski { 237e9da6e99SMarek Szyprowski struct vm_struct *area; 238e9da6e99SMarek Szyprowski unsigned long addr; 239e9da6e99SMarek Szyprowski 240e9da6e99SMarek Szyprowski /* 241e9da6e99SMarek Szyprowski * DMA allocation can be mapped to user space, so lets 242e9da6e99SMarek Szyprowski * set VM_USERMAP flags too. 243e9da6e99SMarek Szyprowski */ 244e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 245e9da6e99SMarek Szyprowski caller); 246e9da6e99SMarek Szyprowski if (!area) 247e9da6e99SMarek Szyprowski return NULL; 248e9da6e99SMarek Szyprowski addr = (unsigned long)area->addr; 249e9da6e99SMarek Szyprowski area->phys_addr = __pfn_to_phys(page_to_pfn(page)); 250e9da6e99SMarek Szyprowski 251e9da6e99SMarek Szyprowski if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { 252e9da6e99SMarek Szyprowski vunmap((void *)addr); 253e9da6e99SMarek Szyprowski return NULL; 254e9da6e99SMarek Szyprowski } 255e9da6e99SMarek Szyprowski return (void *)addr; 256e9da6e99SMarek Szyprowski } 257e9da6e99SMarek Szyprowski 258e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 259e9da6e99SMarek Szyprowski { 260e9da6e99SMarek Szyprowski unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 261e9da6e99SMarek Szyprowski struct vm_struct *area = find_vm_area(cpu_addr); 262e9da6e99SMarek Szyprowski if (!area || (area->flags & flags) != flags) { 263e9da6e99SMarek Szyprowski WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 264e9da6e99SMarek Szyprowski return; 265e9da6e99SMarek Szyprowski } 266e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 267e9da6e99SMarek Szyprowski vunmap(cpu_addr); 268e9da6e99SMarek Szyprowski } 269e9da6e99SMarek Szyprowski 270e9da6e99SMarek Szyprowski struct dma_pool { 271e9da6e99SMarek Szyprowski size_t size; 272e9da6e99SMarek Szyprowski spinlock_t lock; 273e9da6e99SMarek Szyprowski unsigned long *bitmap; 274e9da6e99SMarek Szyprowski unsigned long nr_pages; 275e9da6e99SMarek Szyprowski void *vaddr; 276e9da6e99SMarek Szyprowski struct page *page; 277c7909509SMarek Szyprowski }; 278c7909509SMarek Szyprowski 279e9da6e99SMarek Szyprowski static struct dma_pool atomic_pool = { 280e9da6e99SMarek Szyprowski .size = SZ_256K, 281e9da6e99SMarek Szyprowski }; 282c7909509SMarek Szyprowski 283c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 284c7909509SMarek Szyprowski { 285e9da6e99SMarek Szyprowski atomic_pool.size = memparse(p, &p); 286c7909509SMarek Szyprowski return 0; 287c7909509SMarek Szyprowski } 288c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 289c7909509SMarek Szyprowski 290c7909509SMarek Szyprowski /* 291c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 292c7909509SMarek Szyprowski */ 293e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 294c7909509SMarek Szyprowski { 295e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 296c7909509SMarek Szyprowski pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 297e9da6e99SMarek Szyprowski unsigned long nr_pages = pool->size >> PAGE_SHIFT; 298e9da6e99SMarek Szyprowski unsigned long *bitmap; 299c7909509SMarek Szyprowski struct page *page; 300c7909509SMarek Szyprowski void *ptr; 301e9da6e99SMarek Szyprowski int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 302c7909509SMarek Szyprowski 303e9da6e99SMarek Szyprowski bitmap = kzalloc(bitmap_size, GFP_KERNEL); 304e9da6e99SMarek Szyprowski if (!bitmap) 305e9da6e99SMarek Szyprowski goto no_bitmap; 306c7909509SMarek Szyprowski 307e9da6e99SMarek Szyprowski if (IS_ENABLED(CONFIG_CMA)) 308e9da6e99SMarek Szyprowski ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 309e9da6e99SMarek Szyprowski else 310e9da6e99SMarek Szyprowski ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 311e9da6e99SMarek Szyprowski &page, NULL); 312c7909509SMarek Szyprowski if (ptr) { 313e9da6e99SMarek Szyprowski spin_lock_init(&pool->lock); 314e9da6e99SMarek Szyprowski pool->vaddr = ptr; 315e9da6e99SMarek Szyprowski pool->page = page; 316e9da6e99SMarek Szyprowski pool->bitmap = bitmap; 317e9da6e99SMarek Szyprowski pool->nr_pages = nr_pages; 318e9da6e99SMarek Szyprowski pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 319e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 320c7909509SMarek Szyprowski return 0; 321c7909509SMarek Szyprowski } 322e9da6e99SMarek Szyprowski kfree(bitmap); 323e9da6e99SMarek Szyprowski no_bitmap: 324e9da6e99SMarek Szyprowski pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 325e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 326c7909509SMarek Szyprowski return -ENOMEM; 327c7909509SMarek Szyprowski } 328c7909509SMarek Szyprowski /* 329c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 330c7909509SMarek Szyprowski */ 331e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 332c7909509SMarek Szyprowski 333c7909509SMarek Szyprowski struct dma_contig_early_reserve { 334c7909509SMarek Szyprowski phys_addr_t base; 335c7909509SMarek Szyprowski unsigned long size; 336c7909509SMarek Szyprowski }; 337c7909509SMarek Szyprowski 338c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 339c7909509SMarek Szyprowski 340c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 341c7909509SMarek Szyprowski 342c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 343c7909509SMarek Szyprowski { 344c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 345c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 346c7909509SMarek Szyprowski dma_mmu_remap_num++; 347c7909509SMarek Szyprowski } 348c7909509SMarek Szyprowski 349c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 350c7909509SMarek Szyprowski { 351c7909509SMarek Szyprowski int i; 352c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 353c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 354c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 355c7909509SMarek Szyprowski struct map_desc map; 356c7909509SMarek Szyprowski unsigned long addr; 357c7909509SMarek Szyprowski 358c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 359c7909509SMarek Szyprowski end = arm_lowmem_limit; 360c7909509SMarek Szyprowski if (start >= end) 36139f78e70SChris Brand continue; 362c7909509SMarek Szyprowski 363c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 364c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 365c7909509SMarek Szyprowski map.length = end - start; 366c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 367c7909509SMarek Szyprowski 368c7909509SMarek Szyprowski /* 369c7909509SMarek Szyprowski * Clear previous low-memory mapping 370c7909509SMarek Szyprowski */ 371c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 37261f6c7a4SVitaly Andrianov addr += PMD_SIZE) 373c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 374c7909509SMarek Szyprowski 375c7909509SMarek Szyprowski iotable_init(&map, 1); 376c7909509SMarek Szyprowski } 377c7909509SMarek Szyprowski } 378c7909509SMarek Szyprowski 379c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 380c7909509SMarek Szyprowski void *data) 381c7909509SMarek Szyprowski { 382c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 383c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 384c7909509SMarek Szyprowski 385c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 386c7909509SMarek Szyprowski return 0; 387c7909509SMarek Szyprowski } 388c7909509SMarek Szyprowski 389c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 390c7909509SMarek Szyprowski { 391c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 392c7909509SMarek Szyprowski unsigned end = start + size; 393c7909509SMarek Szyprowski 394c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 395c7909509SMarek Szyprowski dsb(); 396c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 397c7909509SMarek Szyprowski } 398c7909509SMarek Szyprowski 399c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 400c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 401c7909509SMarek Szyprowski const void *caller) 402c7909509SMarek Szyprowski { 403c7909509SMarek Szyprowski struct page *page; 404c7909509SMarek Szyprowski void *ptr; 405c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 406c7909509SMarek Szyprowski if (!page) 407c7909509SMarek Szyprowski return NULL; 408c7909509SMarek Szyprowski 409c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 410c7909509SMarek Szyprowski if (!ptr) { 411c7909509SMarek Szyprowski __dma_free_buffer(page, size); 412c7909509SMarek Szyprowski return NULL; 413c7909509SMarek Szyprowski } 414c7909509SMarek Szyprowski 415c7909509SMarek Szyprowski *ret_page = page; 416c7909509SMarek Szyprowski return ptr; 417c7909509SMarek Szyprowski } 418c7909509SMarek Szyprowski 419e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 420c7909509SMarek Szyprowski { 421e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 422e9da6e99SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 423e9da6e99SMarek Szyprowski unsigned int pageno; 424e9da6e99SMarek Szyprowski unsigned long flags; 425e9da6e99SMarek Szyprowski void *ptr = NULL; 426e4ea6918SAaro Koskinen unsigned long align_mask; 427c7909509SMarek Szyprowski 428e9da6e99SMarek Szyprowski if (!pool->vaddr) { 429e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 430c7909509SMarek Szyprowski return NULL; 431c7909509SMarek Szyprowski } 432c7909509SMarek Szyprowski 433c7909509SMarek Szyprowski /* 434c7909509SMarek Szyprowski * Align the region allocation - allocations from pool are rather 435c7909509SMarek Szyprowski * small, so align them to their order in pages, minimum is a page 436c7909509SMarek Szyprowski * size. This helps reduce fragmentation of the DMA space. 437c7909509SMarek Szyprowski */ 438e4ea6918SAaro Koskinen align_mask = (1 << get_order(size)) - 1; 439e9da6e99SMarek Szyprowski 440e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 441e9da6e99SMarek Szyprowski pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 442e4ea6918SAaro Koskinen 0, count, align_mask); 443e9da6e99SMarek Szyprowski if (pageno < pool->nr_pages) { 444e9da6e99SMarek Szyprowski bitmap_set(pool->bitmap, pageno, count); 445e9da6e99SMarek Szyprowski ptr = pool->vaddr + PAGE_SIZE * pageno; 446e9da6e99SMarek Szyprowski *ret_page = pool->page + pageno; 447e9da6e99SMarek Szyprowski } 448e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 449e9da6e99SMarek Szyprowski 450c7909509SMarek Szyprowski return ptr; 451c7909509SMarek Szyprowski } 452c7909509SMarek Szyprowski 453e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 454c7909509SMarek Szyprowski { 455e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 456e9da6e99SMarek Szyprowski unsigned long pageno, count; 457e9da6e99SMarek Szyprowski unsigned long flags; 458c7909509SMarek Szyprowski 459e9da6e99SMarek Szyprowski if (start < pool->vaddr || start > pool->vaddr + pool->size) 460c7909509SMarek Szyprowski return 0; 461c7909509SMarek Szyprowski 462e9da6e99SMarek Szyprowski if (start + size > pool->vaddr + pool->size) { 463e9da6e99SMarek Szyprowski WARN(1, "freeing wrong coherent size from pool\n"); 464e9da6e99SMarek Szyprowski return 0; 465c7909509SMarek Szyprowski } 466c7909509SMarek Szyprowski 467e9da6e99SMarek Szyprowski pageno = (start - pool->vaddr) >> PAGE_SHIFT; 468e9da6e99SMarek Szyprowski count = size >> PAGE_SHIFT; 469e9da6e99SMarek Szyprowski 470e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 471e9da6e99SMarek Szyprowski bitmap_clear(pool->bitmap, pageno, count); 472e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 473e9da6e99SMarek Szyprowski 474c7909509SMarek Szyprowski return 1; 475c7909509SMarek Szyprowski } 476c7909509SMarek Szyprowski 477c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 478c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page) 479c7909509SMarek Szyprowski { 480c7909509SMarek Szyprowski unsigned long order = get_order(size); 481c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 482c7909509SMarek Szyprowski struct page *page; 483c7909509SMarek Szyprowski 484c7909509SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 485c7909509SMarek Szyprowski if (!page) 486c7909509SMarek Szyprowski return NULL; 487c7909509SMarek Szyprowski 488c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 489c7909509SMarek Szyprowski __dma_remap(page, size, prot); 490c7909509SMarek Szyprowski 491c7909509SMarek Szyprowski *ret_page = page; 492c7909509SMarek Szyprowski return page_address(page); 493c7909509SMarek Szyprowski } 494c7909509SMarek Szyprowski 495c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 496c7909509SMarek Szyprowski size_t size) 497c7909509SMarek Szyprowski { 498c7909509SMarek Szyprowski __dma_remap(page, size, pgprot_kernel); 499c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 500c7909509SMarek Szyprowski } 501c7909509SMarek Szyprowski 502f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 503f99d6034SMarek Szyprowski { 504f99d6034SMarek Szyprowski prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 505f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 506f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 507f99d6034SMarek Szyprowski return prot; 508f99d6034SMarek Szyprowski } 509f99d6034SMarek Szyprowski 510c7909509SMarek Szyprowski #define nommu() 0 511c7909509SMarek Szyprowski 512ab6494f0SCatalin Marinas #else /* !CONFIG_MMU */ 513695ae0afSRussell King 514c7909509SMarek Szyprowski #define nommu() 1 515c7909509SMarek Szyprowski 516f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot) __pgprot(0) 517c7909509SMarek Szyprowski #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 518e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page) NULL 519c7909509SMarek Szyprowski #define __alloc_from_contiguous(dev, size, prot, ret) NULL 520c7909509SMarek Szyprowski #define __free_from_pool(cpu_addr, size) 0 521c7909509SMarek Szyprowski #define __free_from_contiguous(dev, page, size) do { } while (0) 522c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size) do { } while (0) 52331ebf944SRussell King 52431ebf944SRussell King #endif /* CONFIG_MMU */ 52531ebf944SRussell King 526c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 527c7909509SMarek Szyprowski struct page **ret_page) 528ab6494f0SCatalin Marinas { 52904da5694SRussell King struct page *page; 530c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 531c7909509SMarek Szyprowski if (!page) 532c7909509SMarek Szyprowski return NULL; 533c7909509SMarek Szyprowski 534c7909509SMarek Szyprowski *ret_page = page; 535c7909509SMarek Szyprowski return page_address(page); 536c7909509SMarek Szyprowski } 537c7909509SMarek Szyprowski 538c7909509SMarek Szyprowski 539c7909509SMarek Szyprowski 540c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 541c7909509SMarek Szyprowski gfp_t gfp, pgprot_t prot, const void *caller) 542c7909509SMarek Szyprowski { 543c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 544c7909509SMarek Szyprowski struct page *page; 54531ebf944SRussell King void *addr; 546ab6494f0SCatalin Marinas 547c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 548c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 549c7909509SMarek Szyprowski if (limit && size >= limit) { 550c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 551c7909509SMarek Szyprowski size, mask); 552c7909509SMarek Szyprowski return NULL; 553c7909509SMarek Szyprowski } 554c7909509SMarek Szyprowski #endif 555c7909509SMarek Szyprowski 556c7909509SMarek Szyprowski if (!mask) 557c7909509SMarek Szyprowski return NULL; 558c7909509SMarek Szyprowski 559c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 560c7909509SMarek Szyprowski gfp |= GFP_DMA; 561c7909509SMarek Szyprowski 562ea2e7057SSumit Bhattacharya /* 563ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 564ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 565ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 566ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 567ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 568ea2e7057SSumit Bhattacharya */ 569ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 570ea2e7057SSumit Bhattacharya 571553ac788SMarek Szyprowski *handle = DMA_ERROR_CODE; 57204da5694SRussell King size = PAGE_ALIGN(size); 57304da5694SRussell King 574c7909509SMarek Szyprowski if (arch_is_coherent() || nommu()) 575c7909509SMarek Szyprowski addr = __alloc_simple_buffer(dev, size, gfp, &page); 576e9da6e99SMarek Szyprowski else if (gfp & GFP_ATOMIC) 577e9da6e99SMarek Szyprowski addr = __alloc_from_pool(size, &page); 578f1ae98daSMarek Szyprowski else if (!IS_ENABLED(CONFIG_CMA)) 579c7909509SMarek Szyprowski addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 58031ebf944SRussell King else 581c7909509SMarek Szyprowski addr = __alloc_from_contiguous(dev, size, prot, &page); 58231ebf944SRussell King 58331ebf944SRussell King if (addr) 5849eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 58531ebf944SRussell King 58631ebf944SRussell King return addr; 587ab6494f0SCatalin Marinas } 588695ae0afSRussell King 5890ddbccd1SRussell King /* 5900ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 5910ddbccd1SRussell King * virtual and bus address for that space. 5920ddbccd1SRussell King */ 593f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 594f99d6034SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 5950ddbccd1SRussell King { 596f99d6034SMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 5970ddbccd1SRussell King void *memory; 5980ddbccd1SRussell King 5990ddbccd1SRussell King if (dma_alloc_from_coherent(dev, size, handle, &memory)) 6000ddbccd1SRussell King return memory; 6010ddbccd1SRussell King 602f99d6034SMarek Szyprowski return __dma_alloc(dev, size, handle, gfp, prot, 60345cd5290SRussell King __builtin_return_address(0)); 6040ddbccd1SRussell King } 6050ddbccd1SRussell King 6060ddbccd1SRussell King /* 607f99d6034SMarek Szyprowski * Create userspace mapping for the DMA-coherent memory. 6080ddbccd1SRussell King */ 609f99d6034SMarek Szyprowski int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 610f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 611f99d6034SMarek Szyprowski struct dma_attrs *attrs) 6120ddbccd1SRussell King { 613ab6494f0SCatalin Marinas int ret = -ENXIO; 614ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 61550262a4bSMarek Szyprowski unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 61650262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 617c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 61850262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 61950262a4bSMarek Szyprowski 620f99d6034SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 621f99d6034SMarek Szyprowski 62247142f07SMarek Szyprowski if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 62347142f07SMarek Szyprowski return ret; 62447142f07SMarek Szyprowski 62550262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 6260ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 62750262a4bSMarek Szyprowski pfn + off, 628c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 6290ddbccd1SRussell King vma->vm_page_prot); 63050262a4bSMarek Szyprowski } 631ab6494f0SCatalin Marinas #endif /* CONFIG_MMU */ 6320ddbccd1SRussell King 6330ddbccd1SRussell King return ret; 6340ddbccd1SRussell King } 6350ddbccd1SRussell King 6360ddbccd1SRussell King /* 637c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 6380ddbccd1SRussell King */ 639f99d6034SMarek Szyprowski void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 640f99d6034SMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 6410ddbccd1SRussell King { 642c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 6430ddbccd1SRussell King 6440ddbccd1SRussell King if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 6450ddbccd1SRussell King return; 6460ddbccd1SRussell King 6473e82d012SRussell King size = PAGE_ALIGN(size); 6483e82d012SRussell King 649c7909509SMarek Szyprowski if (arch_is_coherent() || nommu()) { 650c7909509SMarek Szyprowski __dma_free_buffer(page, size); 651d9e0d149SAaro Koskinen } else if (__free_from_pool(cpu_addr, size)) { 652d9e0d149SAaro Koskinen return; 653f1ae98daSMarek Szyprowski } else if (!IS_ENABLED(CONFIG_CMA)) { 654695ae0afSRussell King __dma_free_remap(cpu_addr, size); 655c7909509SMarek Szyprowski __dma_free_buffer(page, size); 656c7909509SMarek Szyprowski } else { 657c7909509SMarek Szyprowski /* 658c7909509SMarek Szyprowski * Non-atomic allocations cannot be freed with IRQs disabled 659c7909509SMarek Szyprowski */ 660c7909509SMarek Szyprowski WARN_ON(irqs_disabled()); 661c7909509SMarek Szyprowski __free_from_contiguous(dev, page, size); 662c7909509SMarek Szyprowski } 6630ddbccd1SRussell King } 664afd1a321SRussell King 665dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 666dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 667dc2832e1SMarek Szyprowski struct dma_attrs *attrs) 668dc2832e1SMarek Szyprowski { 669dc2832e1SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 670dc2832e1SMarek Szyprowski int ret; 671dc2832e1SMarek Szyprowski 672dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 673dc2832e1SMarek Szyprowski if (unlikely(ret)) 674dc2832e1SMarek Szyprowski return ret; 675dc2832e1SMarek Szyprowski 676dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 677dc2832e1SMarek Szyprowski return 0; 678dc2832e1SMarek Szyprowski } 679dc2832e1SMarek Szyprowski 68065af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 681a9c9147eSRussell King size_t size, enum dma_data_direction dir, 682a9c9147eSRussell King void (*op)(const void *, size_t, int)) 68365af191aSRussell King { 68465af191aSRussell King /* 68565af191aSRussell King * A single sg entry may refer to multiple physically contiguous 68665af191aSRussell King * pages. But we still need to process highmem pages individually. 68765af191aSRussell King * If highmem is not configured then the bulk of this loop gets 68865af191aSRussell King * optimized out. 68965af191aSRussell King */ 69065af191aSRussell King size_t left = size; 69165af191aSRussell King do { 69265af191aSRussell King size_t len = left; 69393f1d629SRussell King void *vaddr; 69493f1d629SRussell King 69593f1d629SRussell King if (PageHighMem(page)) { 69693f1d629SRussell King if (len + offset > PAGE_SIZE) { 69765af191aSRussell King if (offset >= PAGE_SIZE) { 69865af191aSRussell King page += offset / PAGE_SIZE; 69965af191aSRussell King offset %= PAGE_SIZE; 70065af191aSRussell King } 70165af191aSRussell King len = PAGE_SIZE - offset; 70265af191aSRussell King } 70393f1d629SRussell King vaddr = kmap_high_get(page); 70493f1d629SRussell King if (vaddr) { 70593f1d629SRussell King vaddr += offset; 706a9c9147eSRussell King op(vaddr, len, dir); 70793f1d629SRussell King kunmap_high(page); 7087e5a69e8SNicolas Pitre } else if (cache_is_vipt()) { 70939af22a7SNicolas Pitre /* unmapped pages might still be cached */ 71039af22a7SNicolas Pitre vaddr = kmap_atomic(page); 7117e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 71239af22a7SNicolas Pitre kunmap_atomic(vaddr); 71393f1d629SRussell King } 71493f1d629SRussell King } else { 71593f1d629SRussell King vaddr = page_address(page) + offset; 716a9c9147eSRussell King op(vaddr, len, dir); 71793f1d629SRussell King } 71865af191aSRussell King offset = 0; 71965af191aSRussell King page++; 72065af191aSRussell King left -= len; 72165af191aSRussell King } while (left); 72265af191aSRussell King } 72365af191aSRussell King 72451fde349SMarek Szyprowski /* 72551fde349SMarek Szyprowski * Make an area consistent for devices. 72651fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 72751fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 72851fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 72951fde349SMarek Szyprowski */ 73051fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 73165af191aSRussell King size_t size, enum dma_data_direction dir) 73265af191aSRussell King { 73343377453SNicolas Pitre unsigned long paddr; 73443377453SNicolas Pitre 735a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 73643377453SNicolas Pitre 73765af191aSRussell King paddr = page_to_phys(page) + off; 7382ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 7392ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 7402ffe2da3SRussell King } else { 7412ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 7422ffe2da3SRussell King } 7432ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 74443377453SNicolas Pitre } 7454ea0d737SRussell King 74651fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 7474ea0d737SRussell King size_t size, enum dma_data_direction dir) 7484ea0d737SRussell King { 7492ffe2da3SRussell King unsigned long paddr = page_to_phys(page) + off; 7502ffe2da3SRussell King 7512ffe2da3SRussell King /* FIXME: non-speculating: not required */ 7522ffe2da3SRussell King /* don't bother invalidating if DMA to device */ 7532ffe2da3SRussell King if (dir != DMA_TO_DEVICE) 7542ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 7552ffe2da3SRussell King 756a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 757c0177800SCatalin Marinas 758c0177800SCatalin Marinas /* 759c0177800SCatalin Marinas * Mark the D-cache clean for this page to avoid extra flushing. 760c0177800SCatalin Marinas */ 761c0177800SCatalin Marinas if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) 762c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 7634ea0d737SRussell King } 76443377453SNicolas Pitre 765afd1a321SRussell King /** 7662a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 767afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 768afd1a321SRussell King * @sg: list of buffers 769afd1a321SRussell King * @nents: number of buffers to map 770afd1a321SRussell King * @dir: DMA transfer direction 771afd1a321SRussell King * 772afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 773afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 774afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 775afd1a321SRussell King * appropriate dma address and length. They are obtained via 776afd1a321SRussell King * sg_dma_{address,length}. 777afd1a321SRussell King * 778afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 779afd1a321SRussell King * here. 780afd1a321SRussell King */ 7812dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 7822dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 783afd1a321SRussell King { 7842a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 785afd1a321SRussell King struct scatterlist *s; 78601135d92SRussell King int i, j; 787afd1a321SRussell King 788afd1a321SRussell King for_each_sg(sg, s, nents, i) { 7894ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 7904ce63fcdSMarek Szyprowski s->dma_length = s->length; 7914ce63fcdSMarek Szyprowski #endif 7922a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 7932a550e73SMarek Szyprowski s->length, dir, attrs); 79401135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 79501135d92SRussell King goto bad_mapping; 796afd1a321SRussell King } 797afd1a321SRussell King return nents; 79801135d92SRussell King 79901135d92SRussell King bad_mapping: 80001135d92SRussell King for_each_sg(sg, s, i, j) 8012a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 80201135d92SRussell King return 0; 803afd1a321SRussell King } 804afd1a321SRussell King 805afd1a321SRussell King /** 8062a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 807afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 808afd1a321SRussell King * @sg: list of buffers 8090adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 810afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 811afd1a321SRussell King * 812afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 813afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 814afd1a321SRussell King */ 8152dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 8162dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 817afd1a321SRussell King { 8182a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 81901135d92SRussell King struct scatterlist *s; 82001135d92SRussell King 82101135d92SRussell King int i; 82224056f52SRussell King 82301135d92SRussell King for_each_sg(sg, s, nents, i) 8242a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 825afd1a321SRussell King } 826afd1a321SRussell King 827afd1a321SRussell King /** 8282a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 829afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 830afd1a321SRussell King * @sg: list of buffers 831afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 832afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 833afd1a321SRussell King */ 8342dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 835afd1a321SRussell King int nents, enum dma_data_direction dir) 836afd1a321SRussell King { 8372a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 838afd1a321SRussell King struct scatterlist *s; 839afd1a321SRussell King int i; 840afd1a321SRussell King 8412a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 8422a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 8432a550e73SMarek Szyprowski dir); 844afd1a321SRussell King } 84524056f52SRussell King 846afd1a321SRussell King /** 8472a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 848afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 849afd1a321SRussell King * @sg: list of buffers 850afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 851afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 852afd1a321SRussell King */ 8532dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 854afd1a321SRussell King int nents, enum dma_data_direction dir) 855afd1a321SRussell King { 8562a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 857afd1a321SRussell King struct scatterlist *s; 858afd1a321SRussell King int i; 859afd1a321SRussell King 8602a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 8612a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 8622a550e73SMarek Szyprowski dir); 863afd1a321SRussell King } 86424056f52SRussell King 865022ae537SRussell King /* 866022ae537SRussell King * Return whether the given device DMA address mask can be supported 867022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 868022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 869022ae537SRussell King * to this function. 870022ae537SRussell King */ 871022ae537SRussell King int dma_supported(struct device *dev, u64 mask) 872022ae537SRussell King { 873022ae537SRussell King if (mask < (u64)arm_dma_limit) 874022ae537SRussell King return 0; 875022ae537SRussell King return 1; 876022ae537SRussell King } 877022ae537SRussell King EXPORT_SYMBOL(dma_supported); 878022ae537SRussell King 8792dc6a016SMarek Szyprowski static int arm_dma_set_mask(struct device *dev, u64 dma_mask) 880022ae537SRussell King { 881022ae537SRussell King if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 882022ae537SRussell King return -EIO; 883022ae537SRussell King 884022ae537SRussell King *dev->dma_mask = dma_mask; 885022ae537SRussell King 886022ae537SRussell King return 0; 887022ae537SRussell King } 888022ae537SRussell King 88924056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES 4096 89024056f52SRussell King 89124056f52SRussell King static int __init dma_debug_do_init(void) 89224056f52SRussell King { 89324056f52SRussell King dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 89424056f52SRussell King return 0; 89524056f52SRussell King } 89624056f52SRussell King fs_initcall(dma_debug_do_init); 8974ce63fcdSMarek Szyprowski 8984ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 8994ce63fcdSMarek Szyprowski 9004ce63fcdSMarek Szyprowski /* IOMMU */ 9014ce63fcdSMarek Szyprowski 9024ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 9034ce63fcdSMarek Szyprowski size_t size) 9044ce63fcdSMarek Szyprowski { 9054ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 9064ce63fcdSMarek Szyprowski unsigned int align = 0; 9074ce63fcdSMarek Szyprowski unsigned int count, start; 9084ce63fcdSMarek Szyprowski unsigned long flags; 9094ce63fcdSMarek Szyprowski 9104ce63fcdSMarek Szyprowski count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 9114ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 9124ce63fcdSMarek Szyprowski 9134ce63fcdSMarek Szyprowski if (order > mapping->order) 9144ce63fcdSMarek Szyprowski align = (1 << (order - mapping->order)) - 1; 9154ce63fcdSMarek Szyprowski 9164ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 9174ce63fcdSMarek Szyprowski start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 9184ce63fcdSMarek Szyprowski count, align); 9194ce63fcdSMarek Szyprowski if (start > mapping->bits) { 9204ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 9214ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 9224ce63fcdSMarek Szyprowski } 9234ce63fcdSMarek Szyprowski 9244ce63fcdSMarek Szyprowski bitmap_set(mapping->bitmap, start, count); 9254ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 9264ce63fcdSMarek Szyprowski 9274ce63fcdSMarek Szyprowski return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 9284ce63fcdSMarek Szyprowski } 9294ce63fcdSMarek Szyprowski 9304ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 9314ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 9324ce63fcdSMarek Szyprowski { 9334ce63fcdSMarek Szyprowski unsigned int start = (addr - mapping->base) >> 9344ce63fcdSMarek Szyprowski (mapping->order + PAGE_SHIFT); 9354ce63fcdSMarek Szyprowski unsigned int count = ((size >> PAGE_SHIFT) + 9364ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 9374ce63fcdSMarek Szyprowski unsigned long flags; 9384ce63fcdSMarek Szyprowski 9394ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 9404ce63fcdSMarek Szyprowski bitmap_clear(mapping->bitmap, start, count); 9414ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 9424ce63fcdSMarek Szyprowski } 9434ce63fcdSMarek Szyprowski 9444ce63fcdSMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 9454ce63fcdSMarek Szyprowski { 9464ce63fcdSMarek Szyprowski struct page **pages; 9474ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 9484ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 9494ce63fcdSMarek Szyprowski int i = 0; 9504ce63fcdSMarek Szyprowski 9514ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 9524ce63fcdSMarek Szyprowski pages = kzalloc(array_size, gfp); 9534ce63fcdSMarek Szyprowski else 9544ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 9554ce63fcdSMarek Szyprowski if (!pages) 9564ce63fcdSMarek Szyprowski return NULL; 9574ce63fcdSMarek Szyprowski 9584ce63fcdSMarek Szyprowski while (count) { 959593f4735SMarek Szyprowski int j, order = __fls(count); 9604ce63fcdSMarek Szyprowski 9614ce63fcdSMarek Szyprowski pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); 9624ce63fcdSMarek Szyprowski while (!pages[i] && order) 9634ce63fcdSMarek Szyprowski pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); 9644ce63fcdSMarek Szyprowski if (!pages[i]) 9654ce63fcdSMarek Szyprowski goto error; 9664ce63fcdSMarek Szyprowski 9674ce63fcdSMarek Szyprowski if (order) 9684ce63fcdSMarek Szyprowski split_page(pages[i], order); 9694ce63fcdSMarek Szyprowski j = 1 << order; 9704ce63fcdSMarek Szyprowski while (--j) 9714ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 9724ce63fcdSMarek Szyprowski 9734ce63fcdSMarek Szyprowski __dma_clear_buffer(pages[i], PAGE_SIZE << order); 9744ce63fcdSMarek Szyprowski i += 1 << order; 9754ce63fcdSMarek Szyprowski count -= 1 << order; 9764ce63fcdSMarek Szyprowski } 9774ce63fcdSMarek Szyprowski 9784ce63fcdSMarek Szyprowski return pages; 9794ce63fcdSMarek Szyprowski error: 9809fa8af91SMarek Szyprowski while (i--) 9814ce63fcdSMarek Szyprowski if (pages[i]) 9824ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 98346c87852SPrathyush K if (array_size <= PAGE_SIZE) 9844ce63fcdSMarek Szyprowski kfree(pages); 9854ce63fcdSMarek Szyprowski else 9864ce63fcdSMarek Szyprowski vfree(pages); 9874ce63fcdSMarek Szyprowski return NULL; 9884ce63fcdSMarek Szyprowski } 9894ce63fcdSMarek Szyprowski 9904ce63fcdSMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) 9914ce63fcdSMarek Szyprowski { 9924ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 9934ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 9944ce63fcdSMarek Szyprowski int i; 9954ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 9964ce63fcdSMarek Szyprowski if (pages[i]) 9974ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 99846c87852SPrathyush K if (array_size <= PAGE_SIZE) 9994ce63fcdSMarek Szyprowski kfree(pages); 10004ce63fcdSMarek Szyprowski else 10014ce63fcdSMarek Szyprowski vfree(pages); 10024ce63fcdSMarek Szyprowski return 0; 10034ce63fcdSMarek Szyprowski } 10044ce63fcdSMarek Szyprowski 10054ce63fcdSMarek Szyprowski /* 10064ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 10074ce63fcdSMarek Szyprowski */ 10084ce63fcdSMarek Szyprowski static void * 1009e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1010e9da6e99SMarek Szyprowski const void *caller) 10114ce63fcdSMarek Szyprowski { 1012e9da6e99SMarek Szyprowski unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1013e9da6e99SMarek Szyprowski struct vm_struct *area; 1014e9da6e99SMarek Szyprowski unsigned long p; 10154ce63fcdSMarek Szyprowski 1016e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 1017e9da6e99SMarek Szyprowski caller); 1018e9da6e99SMarek Szyprowski if (!area) 10194ce63fcdSMarek Szyprowski return NULL; 1020e9da6e99SMarek Szyprowski 1021e9da6e99SMarek Szyprowski area->pages = pages; 1022e9da6e99SMarek Szyprowski area->nr_pages = nr_pages; 1023e9da6e99SMarek Szyprowski p = (unsigned long)area->addr; 1024e9da6e99SMarek Szyprowski 1025e9da6e99SMarek Szyprowski for (i = 0; i < nr_pages; i++) { 1026e9da6e99SMarek Szyprowski phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); 1027e9da6e99SMarek Szyprowski if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) 1028e9da6e99SMarek Szyprowski goto err; 1029e9da6e99SMarek Szyprowski p += PAGE_SIZE; 10304ce63fcdSMarek Szyprowski } 1031e9da6e99SMarek Szyprowski return area->addr; 1032e9da6e99SMarek Szyprowski err: 1033e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)area->addr, size); 1034e9da6e99SMarek Szyprowski vunmap(area->addr); 10354ce63fcdSMarek Szyprowski return NULL; 10364ce63fcdSMarek Szyprowski } 10374ce63fcdSMarek Szyprowski 10384ce63fcdSMarek Szyprowski /* 10394ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 10404ce63fcdSMarek Szyprowski */ 10414ce63fcdSMarek Szyprowski static dma_addr_t 10424ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 10434ce63fcdSMarek Szyprowski { 10444ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 10454ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 10464ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 10474ce63fcdSMarek Szyprowski int i, ret = DMA_ERROR_CODE; 10484ce63fcdSMarek Szyprowski 10494ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 10504ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 10514ce63fcdSMarek Szyprowski return dma_addr; 10524ce63fcdSMarek Szyprowski 10534ce63fcdSMarek Szyprowski iova = dma_addr; 10544ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 10554ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 10564ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 10574ce63fcdSMarek Szyprowski unsigned int len, j; 10584ce63fcdSMarek Szyprowski 10594ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 10604ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 10614ce63fcdSMarek Szyprowski break; 10624ce63fcdSMarek Szyprowski 10634ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 10644ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, iova, phys, len, 0); 10654ce63fcdSMarek Szyprowski if (ret < 0) 10664ce63fcdSMarek Szyprowski goto fail; 10674ce63fcdSMarek Szyprowski iova += len; 10684ce63fcdSMarek Szyprowski i = j; 10694ce63fcdSMarek Szyprowski } 10704ce63fcdSMarek Szyprowski return dma_addr; 10714ce63fcdSMarek Szyprowski fail: 10724ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 10734ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 10744ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 10754ce63fcdSMarek Szyprowski } 10764ce63fcdSMarek Szyprowski 10774ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 10784ce63fcdSMarek Szyprowski { 10794ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 10804ce63fcdSMarek Szyprowski 10814ce63fcdSMarek Szyprowski /* 10824ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 10834ce63fcdSMarek Szyprowski * result to page size 10844ce63fcdSMarek Szyprowski */ 10854ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 10864ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 10874ce63fcdSMarek Szyprowski 10884ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 10894ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 10904ce63fcdSMarek Szyprowski return 0; 10914ce63fcdSMarek Szyprowski } 10924ce63fcdSMarek Szyprowski 1093955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1094e9da6e99SMarek Szyprowski { 1095e9da6e99SMarek Szyprowski struct vm_struct *area; 1096e9da6e99SMarek Szyprowski 1097955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1098955c757eSMarek Szyprowski return cpu_addr; 1099955c757eSMarek Szyprowski 1100e9da6e99SMarek Szyprowski area = find_vm_area(cpu_addr); 1101e9da6e99SMarek Szyprowski if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1102e9da6e99SMarek Szyprowski return area->pages; 1103e9da6e99SMarek Szyprowski return NULL; 1104e9da6e99SMarek Szyprowski } 1105e9da6e99SMarek Szyprowski 11064ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 11074ce63fcdSMarek Szyprowski dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 11084ce63fcdSMarek Szyprowski { 11094ce63fcdSMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 11104ce63fcdSMarek Szyprowski struct page **pages; 11114ce63fcdSMarek Szyprowski void *addr = NULL; 11124ce63fcdSMarek Szyprowski 11134ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 11144ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 11154ce63fcdSMarek Szyprowski 11164ce63fcdSMarek Szyprowski pages = __iommu_alloc_buffer(dev, size, gfp); 11174ce63fcdSMarek Szyprowski if (!pages) 11184ce63fcdSMarek Szyprowski return NULL; 11194ce63fcdSMarek Szyprowski 11204ce63fcdSMarek Szyprowski *handle = __iommu_create_mapping(dev, pages, size); 11214ce63fcdSMarek Szyprowski if (*handle == DMA_ERROR_CODE) 11224ce63fcdSMarek Szyprowski goto err_buffer; 11234ce63fcdSMarek Szyprowski 1124955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1125955c757eSMarek Szyprowski return pages; 1126955c757eSMarek Szyprowski 1127e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1128e9da6e99SMarek Szyprowski __builtin_return_address(0)); 11294ce63fcdSMarek Szyprowski if (!addr) 11304ce63fcdSMarek Szyprowski goto err_mapping; 11314ce63fcdSMarek Szyprowski 11324ce63fcdSMarek Szyprowski return addr; 11334ce63fcdSMarek Szyprowski 11344ce63fcdSMarek Szyprowski err_mapping: 11354ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 11364ce63fcdSMarek Szyprowski err_buffer: 11374ce63fcdSMarek Szyprowski __iommu_free_buffer(dev, pages, size); 11384ce63fcdSMarek Szyprowski return NULL; 11394ce63fcdSMarek Szyprowski } 11404ce63fcdSMarek Szyprowski 11414ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 11424ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 11434ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 11444ce63fcdSMarek Szyprowski { 11454ce63fcdSMarek Szyprowski unsigned long uaddr = vma->vm_start; 11464ce63fcdSMarek Szyprowski unsigned long usize = vma->vm_end - vma->vm_start; 1147955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1148e9da6e99SMarek Szyprowski 1149e9da6e99SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1150e9da6e99SMarek Szyprowski 1151e9da6e99SMarek Szyprowski if (!pages) 1152e9da6e99SMarek Szyprowski return -ENXIO; 11534ce63fcdSMarek Szyprowski 11544ce63fcdSMarek Szyprowski do { 1155e9da6e99SMarek Szyprowski int ret = vm_insert_page(vma, uaddr, *pages++); 11564ce63fcdSMarek Szyprowski if (ret) { 1157e9da6e99SMarek Szyprowski pr_err("Remapping memory failed: %d\n", ret); 11584ce63fcdSMarek Szyprowski return ret; 11594ce63fcdSMarek Szyprowski } 11604ce63fcdSMarek Szyprowski uaddr += PAGE_SIZE; 11614ce63fcdSMarek Szyprowski usize -= PAGE_SIZE; 11624ce63fcdSMarek Szyprowski } while (usize > 0); 1163e9da6e99SMarek Szyprowski 11644ce63fcdSMarek Szyprowski return 0; 11654ce63fcdSMarek Szyprowski } 11664ce63fcdSMarek Szyprowski 11674ce63fcdSMarek Szyprowski /* 11684ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 11694ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 11704ce63fcdSMarek Szyprowski */ 11714ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 11724ce63fcdSMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 11734ce63fcdSMarek Szyprowski { 1174955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 11754ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 11764ce63fcdSMarek Szyprowski 1177e9da6e99SMarek Szyprowski if (!pages) { 1178e9da6e99SMarek Szyprowski WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1179e9da6e99SMarek Szyprowski return; 1180e9da6e99SMarek Szyprowski } 1181e9da6e99SMarek Szyprowski 1182955c757eSMarek Szyprowski if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1183e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 1184e9da6e99SMarek Szyprowski vunmap(cpu_addr); 1185955c757eSMarek Szyprowski } 1186e9da6e99SMarek Szyprowski 11874ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 11884ce63fcdSMarek Szyprowski __iommu_free_buffer(dev, pages, size); 11894ce63fcdSMarek Szyprowski } 11904ce63fcdSMarek Szyprowski 1191dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1192dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 1193dc2832e1SMarek Szyprowski size_t size, struct dma_attrs *attrs) 1194dc2832e1SMarek Szyprowski { 1195dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1196dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1197dc2832e1SMarek Szyprowski 1198dc2832e1SMarek Szyprowski if (!pages) 1199dc2832e1SMarek Szyprowski return -ENXIO; 1200dc2832e1SMarek Szyprowski 1201dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1202dc2832e1SMarek Szyprowski GFP_KERNEL); 12034ce63fcdSMarek Szyprowski } 12044ce63fcdSMarek Szyprowski 12054ce63fcdSMarek Szyprowski /* 12064ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 12074ce63fcdSMarek Szyprowski */ 12084ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 12094ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 121097ef952aSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 12114ce63fcdSMarek Szyprowski { 12124ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12134ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 12144ce63fcdSMarek Szyprowski int ret = 0; 12154ce63fcdSMarek Szyprowski unsigned int count; 12164ce63fcdSMarek Szyprowski struct scatterlist *s; 12174ce63fcdSMarek Szyprowski 12184ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 12194ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 12204ce63fcdSMarek Szyprowski 12214ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 12224ce63fcdSMarek Szyprowski if (iova == DMA_ERROR_CODE) 12234ce63fcdSMarek Szyprowski return -ENOMEM; 12244ce63fcdSMarek Szyprowski 12254ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 12264ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(sg_page(s)); 12274ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 12284ce63fcdSMarek Szyprowski 122997ef952aSMarek Szyprowski if (!arch_is_coherent() && 123097ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 12314ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 12324ce63fcdSMarek Szyprowski 12334ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, iova, phys, len, 0); 12344ce63fcdSMarek Szyprowski if (ret < 0) 12354ce63fcdSMarek Szyprowski goto fail; 12364ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 12374ce63fcdSMarek Szyprowski iova += len; 12384ce63fcdSMarek Szyprowski } 12394ce63fcdSMarek Szyprowski *handle = iova_base; 12404ce63fcdSMarek Szyprowski 12414ce63fcdSMarek Szyprowski return 0; 12424ce63fcdSMarek Szyprowski fail: 12434ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 12444ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 12454ce63fcdSMarek Szyprowski return ret; 12464ce63fcdSMarek Szyprowski } 12474ce63fcdSMarek Szyprowski 12484ce63fcdSMarek Szyprowski /** 12494ce63fcdSMarek Szyprowski * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 12504ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 12514ce63fcdSMarek Szyprowski * @sg: list of buffers 12524ce63fcdSMarek Szyprowski * @nents: number of buffers to map 12534ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 12544ce63fcdSMarek Szyprowski * 12554ce63fcdSMarek Szyprowski * Map a set of buffers described by scatterlist in streaming mode for DMA. 12564ce63fcdSMarek Szyprowski * The scatter gather list elements are merged together (if possible) and 12574ce63fcdSMarek Szyprowski * tagged with the appropriate dma address and length. They are obtained via 12584ce63fcdSMarek Szyprowski * sg_dma_{address,length}. 12594ce63fcdSMarek Szyprowski */ 12604ce63fcdSMarek Szyprowski int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 12614ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 12624ce63fcdSMarek Szyprowski { 12634ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 12644ce63fcdSMarek Szyprowski int i, count = 0; 12654ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 12664ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 12674ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 12684ce63fcdSMarek Szyprowski 12694ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 12704ce63fcdSMarek Szyprowski s = sg_next(s); 12714ce63fcdSMarek Szyprowski 12724ce63fcdSMarek Szyprowski s->dma_address = DMA_ERROR_CODE; 12734ce63fcdSMarek Szyprowski s->dma_length = 0; 12744ce63fcdSMarek Szyprowski 12754ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 12764ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 127797ef952aSMarek Szyprowski dir, attrs) < 0) 12784ce63fcdSMarek Szyprowski goto bad_mapping; 12794ce63fcdSMarek Szyprowski 12804ce63fcdSMarek Szyprowski dma->dma_address += offset; 12814ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 12824ce63fcdSMarek Szyprowski 12834ce63fcdSMarek Szyprowski size = offset = s->offset; 12844ce63fcdSMarek Szyprowski start = s; 12854ce63fcdSMarek Szyprowski dma = sg_next(dma); 12864ce63fcdSMarek Szyprowski count += 1; 12874ce63fcdSMarek Szyprowski } 12884ce63fcdSMarek Szyprowski size += s->length; 12894ce63fcdSMarek Szyprowski } 129097ef952aSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0) 12914ce63fcdSMarek Szyprowski goto bad_mapping; 12924ce63fcdSMarek Szyprowski 12934ce63fcdSMarek Szyprowski dma->dma_address += offset; 12944ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 12954ce63fcdSMarek Szyprowski 12964ce63fcdSMarek Szyprowski return count+1; 12974ce63fcdSMarek Szyprowski 12984ce63fcdSMarek Szyprowski bad_mapping: 12994ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 13004ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 13014ce63fcdSMarek Szyprowski return 0; 13024ce63fcdSMarek Szyprowski } 13034ce63fcdSMarek Szyprowski 13044ce63fcdSMarek Szyprowski /** 13054ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 13064ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13074ce63fcdSMarek Szyprowski * @sg: list of buffers 13084ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 13094ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 13104ce63fcdSMarek Szyprowski * 13114ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 13124ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 13134ce63fcdSMarek Szyprowski */ 13144ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 13154ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 13164ce63fcdSMarek Szyprowski { 13174ce63fcdSMarek Szyprowski struct scatterlist *s; 13184ce63fcdSMarek Szyprowski int i; 13194ce63fcdSMarek Szyprowski 13204ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) { 13214ce63fcdSMarek Szyprowski if (sg_dma_len(s)) 13224ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), 13234ce63fcdSMarek Szyprowski sg_dma_len(s)); 132497ef952aSMarek Szyprowski if (!arch_is_coherent() && 132597ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 13264ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, 13274ce63fcdSMarek Szyprowski s->length, dir); 13284ce63fcdSMarek Szyprowski } 13294ce63fcdSMarek Szyprowski } 13304ce63fcdSMarek Szyprowski 13314ce63fcdSMarek Szyprowski /** 13324ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 13334ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13344ce63fcdSMarek Szyprowski * @sg: list of buffers 13354ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 13364ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 13374ce63fcdSMarek Szyprowski */ 13384ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 13394ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 13404ce63fcdSMarek Szyprowski { 13414ce63fcdSMarek Szyprowski struct scatterlist *s; 13424ce63fcdSMarek Szyprowski int i; 13434ce63fcdSMarek Szyprowski 13444ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 13454ce63fcdSMarek Szyprowski if (!arch_is_coherent()) 13464ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 13474ce63fcdSMarek Szyprowski 13484ce63fcdSMarek Szyprowski } 13494ce63fcdSMarek Szyprowski 13504ce63fcdSMarek Szyprowski /** 13514ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 13524ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13534ce63fcdSMarek Szyprowski * @sg: list of buffers 13544ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 13554ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 13564ce63fcdSMarek Szyprowski */ 13574ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 13584ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 13594ce63fcdSMarek Szyprowski { 13604ce63fcdSMarek Szyprowski struct scatterlist *s; 13614ce63fcdSMarek Szyprowski int i; 13624ce63fcdSMarek Szyprowski 13634ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 13644ce63fcdSMarek Szyprowski if (!arch_is_coherent()) 13654ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 13664ce63fcdSMarek Szyprowski } 13674ce63fcdSMarek Szyprowski 13684ce63fcdSMarek Szyprowski 13694ce63fcdSMarek Szyprowski /** 13704ce63fcdSMarek Szyprowski * arm_iommu_map_page 13714ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13724ce63fcdSMarek Szyprowski * @page: page that buffer resides in 13734ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 13744ce63fcdSMarek Szyprowski * @size: size of buffer to map 13754ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 13764ce63fcdSMarek Szyprowski * 13774ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 13784ce63fcdSMarek Szyprowski */ 13794ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 13804ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 13814ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 13824ce63fcdSMarek Szyprowski { 13834ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 13844ce63fcdSMarek Szyprowski dma_addr_t dma_addr; 13854ce63fcdSMarek Szyprowski int ret, len = PAGE_ALIGN(size + offset); 13864ce63fcdSMarek Szyprowski 138797ef952aSMarek Szyprowski if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 13884ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 13894ce63fcdSMarek Szyprowski 13904ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, len); 13914ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 13924ce63fcdSMarek Szyprowski return dma_addr; 13934ce63fcdSMarek Szyprowski 13944ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); 13954ce63fcdSMarek Szyprowski if (ret < 0) 13964ce63fcdSMarek Szyprowski goto fail; 13974ce63fcdSMarek Szyprowski 13984ce63fcdSMarek Szyprowski return dma_addr + offset; 13994ce63fcdSMarek Szyprowski fail: 14004ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, len); 14014ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 14024ce63fcdSMarek Szyprowski } 14034ce63fcdSMarek Szyprowski 14044ce63fcdSMarek Szyprowski /** 14054ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 14064ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 14074ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 14084ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 14094ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 14104ce63fcdSMarek Szyprowski * 14114ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 14124ce63fcdSMarek Szyprowski */ 14134ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 14144ce63fcdSMarek Szyprowski size_t size, enum dma_data_direction dir, 14154ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 14164ce63fcdSMarek Szyprowski { 14174ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 14184ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 14194ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 14204ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 14214ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 14224ce63fcdSMarek Szyprowski 14234ce63fcdSMarek Szyprowski if (!iova) 14244ce63fcdSMarek Szyprowski return; 14254ce63fcdSMarek Szyprowski 142697ef952aSMarek Szyprowski if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 14274ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 14284ce63fcdSMarek Szyprowski 14294ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 14304ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 14314ce63fcdSMarek Szyprowski } 14324ce63fcdSMarek Szyprowski 14334ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 14344ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 14354ce63fcdSMarek Szyprowski { 14364ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 14374ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 14384ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 14394ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 14404ce63fcdSMarek Szyprowski 14414ce63fcdSMarek Szyprowski if (!iova) 14424ce63fcdSMarek Szyprowski return; 14434ce63fcdSMarek Szyprowski 14444ce63fcdSMarek Szyprowski if (!arch_is_coherent()) 14454ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 14464ce63fcdSMarek Szyprowski } 14474ce63fcdSMarek Szyprowski 14484ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 14494ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 14504ce63fcdSMarek Szyprowski { 14514ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 14524ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 14534ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 14544ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 14554ce63fcdSMarek Szyprowski 14564ce63fcdSMarek Szyprowski if (!iova) 14574ce63fcdSMarek Szyprowski return; 14584ce63fcdSMarek Szyprowski 14594ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 14604ce63fcdSMarek Szyprowski } 14614ce63fcdSMarek Szyprowski 14624ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = { 14634ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 14644ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 14654ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1466dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 14674ce63fcdSMarek Szyprowski 14684ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 14694ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 14704ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 14714ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 14724ce63fcdSMarek Szyprowski 14734ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 14744ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 14754ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 14764ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 14774ce63fcdSMarek Szyprowski }; 14784ce63fcdSMarek Szyprowski 14794ce63fcdSMarek Szyprowski /** 14804ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 14814ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 14824ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 14834ce63fcdSMarek Szyprowski * @size: size of the valid IO address space 14844ce63fcdSMarek Szyprowski * @order: accuracy of the IO addresses allocations 14854ce63fcdSMarek Szyprowski * 14864ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 14874ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 14884ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 14894ce63fcdSMarek Szyprowski * 14904ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 14914ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 14924ce63fcdSMarek Szyprowski */ 14934ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 14944ce63fcdSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 14954ce63fcdSMarek Szyprowski int order) 14964ce63fcdSMarek Szyprowski { 14974ce63fcdSMarek Szyprowski unsigned int count = size >> (PAGE_SHIFT + order); 14984ce63fcdSMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 14994ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 15004ce63fcdSMarek Szyprowski int err = -ENOMEM; 15014ce63fcdSMarek Szyprowski 15024ce63fcdSMarek Szyprowski if (!count) 15034ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 15044ce63fcdSMarek Szyprowski 15054ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 15064ce63fcdSMarek Szyprowski if (!mapping) 15074ce63fcdSMarek Szyprowski goto err; 15084ce63fcdSMarek Szyprowski 15094ce63fcdSMarek Szyprowski mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 15104ce63fcdSMarek Szyprowski if (!mapping->bitmap) 15114ce63fcdSMarek Szyprowski goto err2; 15124ce63fcdSMarek Szyprowski 15134ce63fcdSMarek Szyprowski mapping->base = base; 15144ce63fcdSMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 15154ce63fcdSMarek Szyprowski mapping->order = order; 15164ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 15174ce63fcdSMarek Szyprowski 15184ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 15194ce63fcdSMarek Szyprowski if (!mapping->domain) 15204ce63fcdSMarek Szyprowski goto err3; 15214ce63fcdSMarek Szyprowski 15224ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 15234ce63fcdSMarek Szyprowski return mapping; 15244ce63fcdSMarek Szyprowski err3: 15254ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 15264ce63fcdSMarek Szyprowski err2: 15274ce63fcdSMarek Szyprowski kfree(mapping); 15284ce63fcdSMarek Szyprowski err: 15294ce63fcdSMarek Szyprowski return ERR_PTR(err); 15304ce63fcdSMarek Szyprowski } 15314ce63fcdSMarek Szyprowski 15324ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 15334ce63fcdSMarek Szyprowski { 15344ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 15354ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 15364ce63fcdSMarek Szyprowski 15374ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 15384ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 15394ce63fcdSMarek Szyprowski kfree(mapping); 15404ce63fcdSMarek Szyprowski } 15414ce63fcdSMarek Szyprowski 15424ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 15434ce63fcdSMarek Szyprowski { 15444ce63fcdSMarek Szyprowski if (mapping) 15454ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 15464ce63fcdSMarek Szyprowski } 15474ce63fcdSMarek Szyprowski 15484ce63fcdSMarek Szyprowski /** 15494ce63fcdSMarek Szyprowski * arm_iommu_attach_device 15504ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 15514ce63fcdSMarek Szyprowski * @mapping: io address space mapping structure (returned from 15524ce63fcdSMarek Szyprowski * arm_iommu_create_mapping) 15534ce63fcdSMarek Szyprowski * 15544ce63fcdSMarek Szyprowski * Attaches specified io address space mapping to the provided device, 15554ce63fcdSMarek Szyprowski * this replaces the dma operations (dma_map_ops pointer) with the 15564ce63fcdSMarek Szyprowski * IOMMU aware version. More than one client might be attached to 15574ce63fcdSMarek Szyprowski * the same io address space mapping. 15584ce63fcdSMarek Szyprowski */ 15594ce63fcdSMarek Szyprowski int arm_iommu_attach_device(struct device *dev, 15604ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 15614ce63fcdSMarek Szyprowski { 15624ce63fcdSMarek Szyprowski int err; 15634ce63fcdSMarek Szyprowski 15644ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 15654ce63fcdSMarek Szyprowski if (err) 15664ce63fcdSMarek Szyprowski return err; 15674ce63fcdSMarek Szyprowski 15684ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 15694ce63fcdSMarek Szyprowski dev->archdata.mapping = mapping; 15704ce63fcdSMarek Szyprowski set_dma_ops(dev, &iommu_ops); 15714ce63fcdSMarek Szyprowski 15724ce63fcdSMarek Szyprowski pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev)); 15734ce63fcdSMarek Szyprowski return 0; 15744ce63fcdSMarek Szyprowski } 15754ce63fcdSMarek Szyprowski 15764ce63fcdSMarek Szyprowski #endif 1577