1 /* 2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> 3 * Copyright (C) 2009 Wind River Systems Inc 4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 5 * 6 * Based on DMA code from MIPS. 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 13 #include <linux/types.h> 14 #include <linux/mm.h> 15 #include <linux/export.h> 16 #include <linux/string.h> 17 #include <linux/scatterlist.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/io.h> 20 #include <linux/cache.h> 21 #include <asm/cacheflush.h> 22 23 static inline void __dma_sync_for_device(void *vaddr, size_t size, 24 enum dma_data_direction direction) 25 { 26 switch (direction) { 27 case DMA_FROM_DEVICE: 28 invalidate_dcache_range((unsigned long)vaddr, 29 (unsigned long)(vaddr + size)); 30 break; 31 case DMA_TO_DEVICE: 32 /* 33 * We just need to flush the caches here , but Nios2 flush 34 * instruction will do both writeback and invalidate. 35 */ 36 case DMA_BIDIRECTIONAL: /* flush and invalidate */ 37 flush_dcache_range((unsigned long)vaddr, 38 (unsigned long)(vaddr + size)); 39 break; 40 default: 41 BUG(); 42 } 43 } 44 45 static inline void __dma_sync_for_cpu(void *vaddr, size_t size, 46 enum dma_data_direction direction) 47 { 48 switch (direction) { 49 case DMA_BIDIRECTIONAL: 50 case DMA_FROM_DEVICE: 51 invalidate_dcache_range((unsigned long)vaddr, 52 (unsigned long)(vaddr + size)); 53 break; 54 case DMA_TO_DEVICE: 55 break; 56 default: 57 BUG(); 58 } 59 } 60 61 static void *nios2_dma_alloc(struct device *dev, size_t size, 62 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 63 { 64 void *ret; 65 66 /* ignore region specifiers */ 67 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 68 69 /* optimized page clearing */ 70 gfp |= __GFP_ZERO; 71 72 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 73 gfp |= GFP_DMA; 74 75 ret = (void *) __get_free_pages(gfp, get_order(size)); 76 if (ret != NULL) { 77 *dma_handle = virt_to_phys(ret); 78 flush_dcache_range((unsigned long) ret, 79 (unsigned long) ret + size); 80 ret = UNCAC_ADDR(ret); 81 } 82 83 return ret; 84 } 85 86 static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, 87 dma_addr_t dma_handle, struct dma_attrs *attrs) 88 { 89 unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); 90 91 free_pages(addr, get_order(size)); 92 } 93 94 static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, 95 int nents, enum dma_data_direction direction, 96 struct dma_attrs *attrs) 97 { 98 int i; 99 100 for_each_sg(sg, sg, nents, i) { 101 void *addr; 102 103 addr = sg_virt(sg); 104 if (addr) { 105 __dma_sync_for_device(addr, sg->length, direction); 106 sg->dma_address = sg_phys(sg); 107 } 108 } 109 110 return nents; 111 } 112 113 static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, 114 unsigned long offset, size_t size, 115 enum dma_data_direction direction, 116 struct dma_attrs *attrs) 117 { 118 void *addr = page_address(page) + offset; 119 120 __dma_sync_for_device(addr, size, direction); 121 return page_to_phys(page) + offset; 122 } 123 124 static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 125 size_t size, enum dma_data_direction direction, 126 struct dma_attrs *attrs) 127 { 128 __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 129 } 130 131 static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 132 int nhwentries, enum dma_data_direction direction, 133 struct dma_attrs *attrs) 134 { 135 void *addr; 136 int i; 137 138 if (direction == DMA_TO_DEVICE) 139 return; 140 141 for_each_sg(sg, sg, nhwentries, i) { 142 addr = sg_virt(sg); 143 if (addr) 144 __dma_sync_for_cpu(addr, sg->length, direction); 145 } 146 } 147 148 static void nios2_dma_sync_single_for_cpu(struct device *dev, 149 dma_addr_t dma_handle, size_t size, 150 enum dma_data_direction direction) 151 { 152 __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 153 } 154 155 static void nios2_dma_sync_single_for_device(struct device *dev, 156 dma_addr_t dma_handle, size_t size, 157 enum dma_data_direction direction) 158 { 159 __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 160 } 161 162 static void nios2_dma_sync_sg_for_cpu(struct device *dev, 163 struct scatterlist *sg, int nelems, 164 enum dma_data_direction direction) 165 { 166 int i; 167 168 /* Make sure that gcc doesn't leave the empty loop body. */ 169 for_each_sg(sg, sg, nelems, i) 170 __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); 171 } 172 173 static void nios2_dma_sync_sg_for_device(struct device *dev, 174 struct scatterlist *sg, int nelems, 175 enum dma_data_direction direction) 176 { 177 int i; 178 179 /* Make sure that gcc doesn't leave the empty loop body. */ 180 for_each_sg(sg, sg, nelems, i) 181 __dma_sync_for_device(sg_virt(sg), sg->length, direction); 182 183 } 184 185 struct dma_map_ops nios2_dma_ops = { 186 .alloc = nios2_dma_alloc, 187 .free = nios2_dma_free, 188 .map_page = nios2_dma_map_page, 189 .unmap_page = nios2_dma_unmap_page, 190 .map_sg = nios2_dma_map_sg, 191 .unmap_sg = nios2_dma_unmap_sg, 192 .sync_single_for_device = nios2_dma_sync_single_for_device, 193 .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, 194 .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, 195 .sync_sg_for_device = nios2_dma_sync_sg_for_device, 196 }; 197 EXPORT_SYMBOL(nios2_dma_ops); 198