1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /* 10 * DMA Coherent API Notes 11 * 12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is 13 * implemented by accessing it using a kernel virtual address, with 14 * Cache bit off in the TLB entry. 15 * 16 * The default DMA address == Phy address which is 0x8000_0000 based. 17 */ 18 19 #include <linux/dma-mapping.h> 20 #include <asm/cache.h> 21 #include <asm/cacheflush.h> 22 23 24 static void *arc_dma_alloc(struct device *dev, size_t size, 25 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 26 { 27 unsigned long order = get_order(size); 28 struct page *page; 29 phys_addr_t paddr; 30 void *kvaddr; 31 int need_coh = 1, need_kvaddr = 0; 32 33 page = alloc_pages(gfp, order); 34 if (!page) 35 return NULL; 36 37 /* 38 * IOC relies on all data (even coherent DMA data) being in cache 39 * Thus allocate normal cached memory 40 * 41 * The gains with IOC are two pronged: 42 * -For streaming data, elides need for cache maintenance, saving 43 * cycles in flush code, and bus bandwidth as all the lines of a 44 * buffer need to be flushed out to memory 45 * -For coherent data, Read/Write to buffers terminate early in cache 46 * (vs. always going to memory - thus are faster) 47 */ 48 if ((is_isa_arcv2() && ioc_enable) || 49 (attrs & DMA_ATTR_NON_CONSISTENT)) 50 need_coh = 0; 51 52 /* 53 * - A coherent buffer needs MMU mapping to enforce non-cachability 54 * - A highmem page needs a virtual handle (hence MMU mapping) 55 * independent of cachability 56 */ 57 if (PageHighMem(page) || need_coh) 58 need_kvaddr = 1; 59 60 /* This is linear addr (0x8000_0000 based) */ 61 paddr = page_to_phys(page); 62 63 *dma_handle = paddr; 64 65 /* This is kernel Virtual address (0x7000_0000 based) */ 66 if (need_kvaddr) { 67 kvaddr = ioremap_nocache(paddr, size); 68 if (kvaddr == NULL) { 69 __free_pages(page, order); 70 return NULL; 71 } 72 } else { 73 kvaddr = (void *)(u32)paddr; 74 } 75 76 /* 77 * Evict any existing L1 and/or L2 lines for the backing page 78 * in case it was used earlier as a normal "cached" page. 79 * Yeah this bit us - STAR 9000898266 80 * 81 * Although core does call flush_cache_vmap(), it gets kvaddr hence 82 * can't be used to efficiently flush L1 and/or L2 which need paddr 83 * Currently flush_cache_vmap nukes the L1 cache completely which 84 * will be optimized as a separate commit 85 */ 86 if (need_coh) 87 dma_cache_wback_inv(paddr, size); 88 89 return kvaddr; 90 } 91 92 static void arc_dma_free(struct device *dev, size_t size, void *vaddr, 93 dma_addr_t dma_handle, unsigned long attrs) 94 { 95 phys_addr_t paddr = dma_handle; 96 struct page *page = virt_to_page(paddr); 97 int is_non_coh = 1; 98 99 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || 100 (is_isa_arcv2() && ioc_enable); 101 102 if (PageHighMem(page) || !is_non_coh) 103 iounmap((void __force __iomem *)vaddr); 104 105 __free_pages(page, get_order(size)); 106 } 107 108 static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, 109 void *cpu_addr, dma_addr_t dma_addr, size_t size, 110 unsigned long attrs) 111 { 112 unsigned long user_count = vma_pages(vma); 113 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 114 unsigned long pfn = __phys_to_pfn(dma_addr); 115 unsigned long off = vma->vm_pgoff; 116 int ret = -ENXIO; 117 118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 119 120 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 121 return ret; 122 123 if (off < count && user_count <= (count - off)) { 124 ret = remap_pfn_range(vma, vma->vm_start, 125 pfn + off, 126 user_count << PAGE_SHIFT, 127 vma->vm_page_prot); 128 } 129 130 return ret; 131 } 132 133 static void arc_dma_sync_single_for_device(struct device *dev, 134 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 135 { 136 dma_cache_wback(dma_handle, size); 137 } 138 139 static void arc_dma_sync_single_for_cpu(struct device *dev, 140 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 141 { 142 dma_cache_inv(dma_handle, size); 143 } 144 145 /* 146 * arc_dma_map_page - map a portion of a page for streaming DMA 147 * 148 * Ensure that any data held in the cache is appropriately discarded 149 * or written back. 150 * 151 * The device owns this memory once this call has completed. The CPU 152 * can regain ownership by calling dma_unmap_page(). 153 * 154 * Note: while it takes struct page as arg, caller can "abuse" it to pass 155 * a region larger than PAGE_SIZE, provided it is physically contiguous 156 * and this still works correctly 157 */ 158 static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, 159 unsigned long offset, size_t size, enum dma_data_direction dir, 160 unsigned long attrs) 161 { 162 phys_addr_t paddr = page_to_phys(page) + offset; 163 164 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 165 arc_dma_sync_single_for_device(dev, paddr, size, dir); 166 167 return paddr; 168 } 169 170 /* 171 * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 172 * 173 * After this call, reads by the CPU to the buffer are guaranteed to see 174 * whatever the device wrote there. 175 * 176 * Note: historically this routine was not implemented for ARC 177 */ 178 static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, 179 size_t size, enum dma_data_direction dir, 180 unsigned long attrs) 181 { 182 phys_addr_t paddr = handle; 183 184 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 185 arc_dma_sync_single_for_cpu(dev, paddr, size, dir); 186 } 187 188 static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, 189 int nents, enum dma_data_direction dir, unsigned long attrs) 190 { 191 struct scatterlist *s; 192 int i; 193 194 for_each_sg(sg, s, nents, i) 195 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 196 s->length, dir); 197 198 return nents; 199 } 200 201 static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 202 int nents, enum dma_data_direction dir, 203 unsigned long attrs) 204 { 205 struct scatterlist *s; 206 int i; 207 208 for_each_sg(sg, s, nents, i) 209 arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 210 attrs); 211 } 212 213 static void arc_dma_sync_sg_for_cpu(struct device *dev, 214 struct scatterlist *sglist, int nelems, 215 enum dma_data_direction dir) 216 { 217 int i; 218 struct scatterlist *sg; 219 220 for_each_sg(sglist, sg, nelems, i) 221 arc_dma_sync_single_for_cpu(dev, sg_phys(sg), sg->length, dir); 222 } 223 224 static void arc_dma_sync_sg_for_device(struct device *dev, 225 struct scatterlist *sglist, int nelems, 226 enum dma_data_direction dir) 227 { 228 int i; 229 struct scatterlist *sg; 230 231 for_each_sg(sglist, sg, nelems, i) 232 arc_dma_sync_single_for_device(dev, sg_phys(sg), sg->length, 233 dir); 234 } 235 236 static int arc_dma_supported(struct device *dev, u64 dma_mask) 237 { 238 /* Support 32 bit DMA mask exclusively */ 239 return dma_mask == DMA_BIT_MASK(32); 240 } 241 242 const struct dma_map_ops arc_dma_ops = { 243 .alloc = arc_dma_alloc, 244 .free = arc_dma_free, 245 .mmap = arc_dma_mmap, 246 .map_page = arc_dma_map_page, 247 .unmap_page = arc_dma_unmap_page, 248 .map_sg = arc_dma_map_sg, 249 .unmap_sg = arc_dma_unmap_sg, 250 .sync_single_for_device = arc_dma_sync_single_for_device, 251 .sync_single_for_cpu = arc_dma_sync_single_for_cpu, 252 .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, 253 .sync_sg_for_device = arc_dma_sync_sg_for_device, 254 .dma_supported = arc_dma_supported, 255 }; 256 EXPORT_SYMBOL(arc_dma_ops); 257