1 /* 2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> 3 * Copyright (C) 2009 Wind River Systems Inc 4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 5 * 6 * Based on DMA code from MIPS. 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 13 #include <linux/types.h> 14 #include <linux/mm.h> 15 #include <linux/export.h> 16 #include <linux/string.h> 17 #include <linux/scatterlist.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/io.h> 20 #include <linux/cache.h> 21 #include <asm/cacheflush.h> 22 23 static inline void __dma_sync_for_device(void *vaddr, size_t size, 24 enum dma_data_direction direction) 25 { 26 switch (direction) { 27 case DMA_FROM_DEVICE: 28 invalidate_dcache_range((unsigned long)vaddr, 29 (unsigned long)(vaddr + size)); 30 break; 31 case DMA_TO_DEVICE: 32 /* 33 * We just need to flush the caches here , but Nios2 flush 34 * instruction will do both writeback and invalidate. 35 */ 36 case DMA_BIDIRECTIONAL: /* flush and invalidate */ 37 flush_dcache_range((unsigned long)vaddr, 38 (unsigned long)(vaddr + size)); 39 break; 40 default: 41 BUG(); 42 } 43 } 44 45 static inline void __dma_sync_for_cpu(void *vaddr, size_t size, 46 enum dma_data_direction direction) 47 { 48 switch (direction) { 49 case DMA_BIDIRECTIONAL: 50 case DMA_FROM_DEVICE: 51 invalidate_dcache_range((unsigned long)vaddr, 52 (unsigned long)(vaddr + size)); 53 break; 54 case DMA_TO_DEVICE: 55 break; 56 default: 57 BUG(); 58 } 59 } 60 61 static void *nios2_dma_alloc(struct device *dev, size_t size, 62 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 63 { 64 void *ret; 65 66 /* optimized page clearing */ 67 gfp |= __GFP_ZERO; 68 69 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 70 gfp |= GFP_DMA; 71 72 ret = (void *) __get_free_pages(gfp, get_order(size)); 73 if (ret != NULL) { 74 *dma_handle = virt_to_phys(ret); 75 flush_dcache_range((unsigned long) ret, 76 (unsigned long) ret + size); 77 ret = UNCAC_ADDR(ret); 78 } 79 80 return ret; 81 } 82 83 static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, 84 dma_addr_t dma_handle, unsigned long attrs) 85 { 86 unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); 87 88 free_pages(addr, get_order(size)); 89 } 90 91 static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, 92 int nents, enum dma_data_direction direction, 93 unsigned long attrs) 94 { 95 int i; 96 97 for_each_sg(sg, sg, nents, i) { 98 void *addr = sg_virt(sg); 99 100 if (!addr) 101 continue; 102 103 sg->dma_address = sg_phys(sg); 104 105 if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 106 continue; 107 108 __dma_sync_for_device(addr, sg->length, direction); 109 } 110 111 return nents; 112 } 113 114 static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, 115 unsigned long offset, size_t size, 116 enum dma_data_direction direction, 117 unsigned long attrs) 118 { 119 void *addr = page_address(page) + offset; 120 121 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 122 __dma_sync_for_device(addr, size, direction); 123 124 return page_to_phys(page) + offset; 125 } 126 127 static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 128 size_t size, enum dma_data_direction direction, 129 unsigned long attrs) 130 { 131 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 132 __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 133 } 134 135 static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 136 int nhwentries, enum dma_data_direction direction, 137 unsigned long attrs) 138 { 139 void *addr; 140 int i; 141 142 if (direction == DMA_TO_DEVICE) 143 return; 144 145 if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 146 return; 147 148 for_each_sg(sg, sg, nhwentries, i) { 149 addr = sg_virt(sg); 150 if (addr) 151 __dma_sync_for_cpu(addr, sg->length, direction); 152 } 153 } 154 155 static void nios2_dma_sync_single_for_cpu(struct device *dev, 156 dma_addr_t dma_handle, size_t size, 157 enum dma_data_direction direction) 158 { 159 __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 160 } 161 162 static void nios2_dma_sync_single_for_device(struct device *dev, 163 dma_addr_t dma_handle, size_t size, 164 enum dma_data_direction direction) 165 { 166 __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 167 } 168 169 static void nios2_dma_sync_sg_for_cpu(struct device *dev, 170 struct scatterlist *sg, int nelems, 171 enum dma_data_direction direction) 172 { 173 int i; 174 175 /* Make sure that gcc doesn't leave the empty loop body. */ 176 for_each_sg(sg, sg, nelems, i) 177 __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); 178 } 179 180 static void nios2_dma_sync_sg_for_device(struct device *dev, 181 struct scatterlist *sg, int nelems, 182 enum dma_data_direction direction) 183 { 184 int i; 185 186 /* Make sure that gcc doesn't leave the empty loop body. */ 187 for_each_sg(sg, sg, nelems, i) 188 __dma_sync_for_device(sg_virt(sg), sg->length, direction); 189 190 } 191 192 const struct dma_map_ops nios2_dma_ops = { 193 .alloc = nios2_dma_alloc, 194 .free = nios2_dma_free, 195 .map_page = nios2_dma_map_page, 196 .unmap_page = nios2_dma_unmap_page, 197 .map_sg = nios2_dma_map_sg, 198 .unmap_sg = nios2_dma_unmap_sg, 199 .sync_single_for_device = nios2_dma_sync_single_for_device, 200 .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, 201 .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, 202 .sync_sg_for_device = nios2_dma_sync_sg_for_device, 203 }; 204 EXPORT_SYMBOL(nios2_dma_ops); 205