1 /* 2 * Based on linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 */ 11 12 #include <linux/export.h> 13 #include <linux/mm.h> 14 #include <linux/dma-direct.h> 15 #include <linux/scatterlist.h> 16 17 #include <asm/cachetype.h> 18 #include <asm/cacheflush.h> 19 #include <asm/outercache.h> 20 #include <asm/cp15.h> 21 22 #include "dma.h" 23 24 /* 25 * dma_direct_ops is used if 26 * - MMU/MPU is off 27 * - cpu is v7m w/o cache support 28 * - device is coherent 29 * otherwise arm_nommu_dma_ops is used. 30 * 31 * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to 32 * [1] on how to declare such memory). 33 * 34 * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt 35 */ 36 37 static void *arm_nommu_dma_alloc(struct device *dev, size_t size, 38 dma_addr_t *dma_handle, gfp_t gfp, 39 unsigned long attrs) 40 41 { 42 void *ret; 43 44 /* 45 * Try generic allocator first if we are advertised that 46 * consistency is not required. 47 */ 48 49 if (attrs & DMA_ATTR_NON_CONSISTENT) 50 return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); 51 52 ret = dma_alloc_from_global_coherent(size, dma_handle); 53 54 /* 55 * dma_alloc_from_global_coherent() may fail because: 56 * 57 * - no consistent DMA region has been defined, so we can't 58 * continue. 59 * - there is no space left in consistent DMA region, so we 60 * only can fallback to generic allocator if we are 61 * advertised that consistency is not required. 62 */ 63 64 WARN_ON_ONCE(ret == NULL); 65 return ret; 66 } 67 68 static void arm_nommu_dma_free(struct device *dev, size_t size, 69 void *cpu_addr, dma_addr_t dma_addr, 70 unsigned long attrs) 71 { 72 if (attrs & DMA_ATTR_NON_CONSISTENT) { 73 dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); 74 } else { 75 int ret = dma_release_from_global_coherent(get_order(size), 76 cpu_addr); 77 78 WARN_ON_ONCE(ret == 0); 79 } 80 81 return; 82 } 83 84 static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 85 void *cpu_addr, dma_addr_t dma_addr, size_t size, 86 unsigned long attrs) 87 { 88 int ret; 89 90 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 91 return ret; 92 93 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 94 } 95 96 97 static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, 98 enum dma_data_direction dir) 99 { 100 dmac_map_area(__va(paddr), size, dir); 101 102 if (dir == DMA_FROM_DEVICE) 103 outer_inv_range(paddr, paddr + size); 104 else 105 outer_clean_range(paddr, paddr + size); 106 } 107 108 static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, 109 enum dma_data_direction dir) 110 { 111 if (dir != DMA_TO_DEVICE) { 112 outer_inv_range(paddr, paddr + size); 113 dmac_unmap_area(__va(paddr), size, dir); 114 } 115 } 116 117 static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, 118 unsigned long offset, size_t size, 119 enum dma_data_direction dir, 120 unsigned long attrs) 121 { 122 dma_addr_t handle = page_to_phys(page) + offset; 123 124 __dma_page_cpu_to_dev(handle, size, dir); 125 126 return handle; 127 } 128 129 static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, 130 size_t size, enum dma_data_direction dir, 131 unsigned long attrs) 132 { 133 __dma_page_dev_to_cpu(handle, size, dir); 134 } 135 136 137 static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, 138 int nents, enum dma_data_direction dir, 139 unsigned long attrs) 140 { 141 int i; 142 struct scatterlist *sg; 143 144 for_each_sg(sgl, sg, nents, i) { 145 sg_dma_address(sg) = sg_phys(sg); 146 sg_dma_len(sg) = sg->length; 147 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); 148 } 149 150 return nents; 151 } 152 153 static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 154 int nents, enum dma_data_direction dir, 155 unsigned long attrs) 156 { 157 struct scatterlist *sg; 158 int i; 159 160 for_each_sg(sgl, sg, nents, i) 161 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); 162 } 163 164 static void arm_nommu_dma_sync_single_for_device(struct device *dev, 165 dma_addr_t handle, size_t size, enum dma_data_direction dir) 166 { 167 __dma_page_cpu_to_dev(handle, size, dir); 168 } 169 170 static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, 171 dma_addr_t handle, size_t size, enum dma_data_direction dir) 172 { 173 __dma_page_cpu_to_dev(handle, size, dir); 174 } 175 176 static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, 177 int nents, enum dma_data_direction dir) 178 { 179 struct scatterlist *sg; 180 int i; 181 182 for_each_sg(sgl, sg, nents, i) 183 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); 184 } 185 186 static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, 187 int nents, enum dma_data_direction dir) 188 { 189 struct scatterlist *sg; 190 int i; 191 192 for_each_sg(sgl, sg, nents, i) 193 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); 194 } 195 196 const struct dma_map_ops arm_nommu_dma_ops = { 197 .alloc = arm_nommu_dma_alloc, 198 .free = arm_nommu_dma_free, 199 .mmap = arm_nommu_dma_mmap, 200 .map_page = arm_nommu_dma_map_page, 201 .unmap_page = arm_nommu_dma_unmap_page, 202 .map_sg = arm_nommu_dma_map_sg, 203 .unmap_sg = arm_nommu_dma_unmap_sg, 204 .sync_single_for_device = arm_nommu_dma_sync_single_for_device, 205 .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, 206 .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, 207 .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, 208 }; 209 EXPORT_SYMBOL(arm_nommu_dma_ops); 210 211 static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) 212 { 213 return coherent ? &dma_direct_ops : &arm_nommu_dma_ops; 214 } 215 216 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 217 const struct iommu_ops *iommu, bool coherent) 218 { 219 const struct dma_map_ops *dma_ops; 220 221 if (IS_ENABLED(CONFIG_CPU_V7M)) { 222 /* 223 * Cache support for v7m is optional, so can be treated as 224 * coherent if no cache has been detected. Note that it is not 225 * enough to check if MPU is in use or not since in absense of 226 * MPU system memory map is used. 227 */ 228 dev->archdata.dma_coherent = (cacheid) ? coherent : true; 229 } else { 230 /* 231 * Assume coherent DMA in case MMU/MPU has not been set up. 232 */ 233 dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; 234 } 235 236 dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); 237 238 set_dma_ops(dev, dma_ops); 239 } 240 241 void arch_teardown_dma_ops(struct device *dev) 242 { 243 } 244 245 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 246 247 static int __init dma_debug_do_init(void) 248 { 249 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 250 return 0; 251 } 252 core_initcall(dma_debug_do_init); 253