1 /* 2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> 3 * Copyright (C) 2009 Wind River Systems Inc 4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 5 * 6 * Based on DMA code from MIPS. 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 13 #include <linux/types.h> 14 #include <linux/mm.h> 15 #include <linux/export.h> 16 #include <linux/string.h> 17 #include <linux/scatterlist.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/io.h> 20 #include <linux/cache.h> 21 #include <asm/cacheflush.h> 22 23 static inline void __dma_sync_for_device(void *vaddr, size_t size, 24 enum dma_data_direction direction) 25 { 26 switch (direction) { 27 case DMA_FROM_DEVICE: 28 invalidate_dcache_range((unsigned long)vaddr, 29 (unsigned long)(vaddr + size)); 30 break; 31 case DMA_TO_DEVICE: 32 /* 33 * We just need to flush the caches here , but Nios2 flush 34 * instruction will do both writeback and invalidate. 35 */ 36 case DMA_BIDIRECTIONAL: /* flush and invalidate */ 37 flush_dcache_range((unsigned long)vaddr, 38 (unsigned long)(vaddr + size)); 39 break; 40 default: 41 BUG(); 42 } 43 } 44 45 static inline void __dma_sync_for_cpu(void *vaddr, size_t size, 46 enum dma_data_direction direction) 47 { 48 switch (direction) { 49 case DMA_BIDIRECTIONAL: 50 case DMA_FROM_DEVICE: 51 invalidate_dcache_range((unsigned long)vaddr, 52 (unsigned long)(vaddr + size)); 53 break; 54 case DMA_TO_DEVICE: 55 break; 56 default: 57 BUG(); 58 } 59 } 60 61 static void *nios2_dma_alloc(struct device *dev, size_t size, 62 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 63 { 64 void *ret; 65 66 /* ignore region specifiers */ 67 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 68 69 /* optimized page clearing */ 70 gfp |= __GFP_ZERO; 71 72 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 73 gfp |= GFP_DMA; 74 75 ret = (void *) __get_free_pages(gfp, get_order(size)); 76 if (ret != NULL) { 77 *dma_handle = virt_to_phys(ret); 78 flush_dcache_range((unsigned long) ret, 79 (unsigned long) ret + size); 80 ret = UNCAC_ADDR(ret); 81 } 82 83 return ret; 84 } 85 86 static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, 87 dma_addr_t dma_handle, unsigned long attrs) 88 { 89 unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); 90 91 free_pages(addr, get_order(size)); 92 } 93 94 static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, 95 int nents, enum dma_data_direction direction, 96 unsigned long attrs) 97 { 98 int i; 99 100 for_each_sg(sg, sg, nents, i) { 101 void *addr = sg_virt(sg); 102 103 if (!addr) 104 continue; 105 106 sg->dma_address = sg_phys(sg); 107 108 if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 109 continue; 110 111 __dma_sync_for_device(addr, sg->length, direction); 112 } 113 114 return nents; 115 } 116 117 static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, 118 unsigned long offset, size_t size, 119 enum dma_data_direction direction, 120 unsigned long attrs) 121 { 122 void *addr = page_address(page) + offset; 123 124 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 125 __dma_sync_for_device(addr, size, direction); 126 127 return page_to_phys(page) + offset; 128 } 129 130 static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 131 size_t size, enum dma_data_direction direction, 132 unsigned long attrs) 133 { 134 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 135 __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 136 } 137 138 static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 139 int nhwentries, enum dma_data_direction direction, 140 unsigned long attrs) 141 { 142 void *addr; 143 int i; 144 145 if (direction == DMA_TO_DEVICE) 146 return; 147 148 if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 149 return; 150 151 for_each_sg(sg, sg, nhwentries, i) { 152 addr = sg_virt(sg); 153 if (addr) 154 __dma_sync_for_cpu(addr, sg->length, direction); 155 } 156 } 157 158 static void nios2_dma_sync_single_for_cpu(struct device *dev, 159 dma_addr_t dma_handle, size_t size, 160 enum dma_data_direction direction) 161 { 162 __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 163 } 164 165 static void nios2_dma_sync_single_for_device(struct device *dev, 166 dma_addr_t dma_handle, size_t size, 167 enum dma_data_direction direction) 168 { 169 __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 170 } 171 172 static void nios2_dma_sync_sg_for_cpu(struct device *dev, 173 struct scatterlist *sg, int nelems, 174 enum dma_data_direction direction) 175 { 176 int i; 177 178 /* Make sure that gcc doesn't leave the empty loop body. */ 179 for_each_sg(sg, sg, nelems, i) 180 __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); 181 } 182 183 static void nios2_dma_sync_sg_for_device(struct device *dev, 184 struct scatterlist *sg, int nelems, 185 enum dma_data_direction direction) 186 { 187 int i; 188 189 /* Make sure that gcc doesn't leave the empty loop body. */ 190 for_each_sg(sg, sg, nelems, i) 191 __dma_sync_for_device(sg_virt(sg), sg->length, direction); 192 193 } 194 195 const struct dma_map_ops nios2_dma_ops = { 196 .alloc = nios2_dma_alloc, 197 .free = nios2_dma_free, 198 .map_page = nios2_dma_map_page, 199 .unmap_page = nios2_dma_unmap_page, 200 .map_sg = nios2_dma_map_sg, 201 .unmap_sg = nios2_dma_unmap_sg, 202 .sync_single_for_device = nios2_dma_sync_single_for_device, 203 .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, 204 .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, 205 .sync_sg_for_device = nios2_dma_sync_sg_for_device, 206 }; 207 EXPORT_SYMBOL(nios2_dma_ops); 208