1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * arch-independent dma-mapping routines 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/dma-noncoherent.h> 11 #include <linux/export.h> 12 #include <linux/gfp.h> 13 #include <linux/of_device.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 17 /* 18 * Managed DMA API 19 */ 20 struct dma_devres { 21 size_t size; 22 void *vaddr; 23 dma_addr_t dma_handle; 24 unsigned long attrs; 25 }; 26 27 static void dmam_release(struct device *dev, void *res) 28 { 29 struct dma_devres *this = res; 30 31 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 32 this->attrs); 33 } 34 35 static int dmam_match(struct device *dev, void *res, void *match_data) 36 { 37 struct dma_devres *this = res, *match = match_data; 38 39 if (this->vaddr == match->vaddr) { 40 WARN_ON(this->size != match->size || 41 this->dma_handle != match->dma_handle); 42 return 1; 43 } 44 return 0; 45 } 46 47 /** 48 * dmam_alloc_coherent - Managed dma_alloc_coherent() 49 * @dev: Device to allocate coherent memory for 50 * @size: Size of allocation 51 * @dma_handle: Out argument for allocated DMA handle 52 * @gfp: Allocation flags 53 * 54 * Managed dma_alloc_coherent(). Memory allocated using this function 55 * will be automatically released on driver detach. 56 * 57 * RETURNS: 58 * Pointer to allocated memory on success, NULL on failure. 59 */ 60 void *dmam_alloc_coherent(struct device *dev, size_t size, 61 dma_addr_t *dma_handle, gfp_t gfp) 62 { 63 struct dma_devres *dr; 64 void *vaddr; 65 66 dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 67 if (!dr) 68 return NULL; 69 70 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); 71 if (!vaddr) { 72 devres_free(dr); 73 return NULL; 74 } 75 76 dr->vaddr = vaddr; 77 dr->dma_handle = *dma_handle; 78 dr->size = size; 79 80 devres_add(dev, dr); 81 82 return vaddr; 83 } 84 EXPORT_SYMBOL(dmam_alloc_coherent); 85 86 /** 87 * dmam_free_coherent - Managed dma_free_coherent() 88 * @dev: Device to free coherent memory for 89 * @size: Size of allocation 90 * @vaddr: Virtual address of the memory to free 91 * @dma_handle: DMA handle of the memory to free 92 * 93 * Managed dma_free_coherent(). 94 */ 95 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 96 dma_addr_t dma_handle) 97 { 98 struct dma_devres match_data = { size, vaddr, dma_handle }; 99 100 dma_free_coherent(dev, size, vaddr, dma_handle); 101 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 102 } 103 EXPORT_SYMBOL(dmam_free_coherent); 104 105 /** 106 * dmam_alloc_attrs - Managed dma_alloc_attrs() 107 * @dev: Device to allocate non_coherent memory for 108 * @size: Size of allocation 109 * @dma_handle: Out argument for allocated DMA handle 110 * @gfp: Allocation flags 111 * @attrs: Flags in the DMA_ATTR_* namespace. 112 * 113 * Managed dma_alloc_attrs(). Memory allocated using this function will be 114 * automatically released on driver detach. 115 * 116 * RETURNS: 117 * Pointer to allocated memory on success, NULL on failure. 118 */ 119 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 120 gfp_t gfp, unsigned long attrs) 121 { 122 struct dma_devres *dr; 123 void *vaddr; 124 125 dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 126 if (!dr) 127 return NULL; 128 129 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 130 if (!vaddr) { 131 devres_free(dr); 132 return NULL; 133 } 134 135 dr->vaddr = vaddr; 136 dr->dma_handle = *dma_handle; 137 dr->size = size; 138 dr->attrs = attrs; 139 140 devres_add(dev, dr); 141 142 return vaddr; 143 } 144 EXPORT_SYMBOL(dmam_alloc_attrs); 145 146 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 147 148 static void dmam_coherent_decl_release(struct device *dev, void *res) 149 { 150 dma_release_declared_memory(dev); 151 } 152 153 /** 154 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() 155 * @dev: Device to declare coherent memory for 156 * @phys_addr: Physical address of coherent memory to be declared 157 * @device_addr: Device address of coherent memory to be declared 158 * @size: Size of coherent memory to be declared 159 * @flags: Flags 160 * 161 * Managed dma_declare_coherent_memory(). 162 * 163 * RETURNS: 164 * 0 on success, -errno on failure. 165 */ 166 int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 167 dma_addr_t device_addr, size_t size, int flags) 168 { 169 void *res; 170 int rc; 171 172 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); 173 if (!res) 174 return -ENOMEM; 175 176 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, 177 flags); 178 if (!rc) 179 devres_add(dev, res); 180 else 181 devres_free(res); 182 183 return rc; 184 } 185 EXPORT_SYMBOL(dmam_declare_coherent_memory); 186 187 /** 188 * dmam_release_declared_memory - Managed dma_release_declared_memory(). 189 * @dev: Device to release declared coherent memory for 190 * 191 * Managed dmam_release_declared_memory(). 192 */ 193 void dmam_release_declared_memory(struct device *dev) 194 { 195 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); 196 } 197 EXPORT_SYMBOL(dmam_release_declared_memory); 198 199 #endif 200 201 /* 202 * Create scatter-list for the already allocated DMA buffer. 203 */ 204 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 205 void *cpu_addr, dma_addr_t dma_addr, size_t size, 206 unsigned long attrs) 207 { 208 struct page *page; 209 int ret; 210 211 if (!dev_is_dma_coherent(dev)) { 212 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 213 return -ENXIO; 214 215 page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, 216 dma_addr)); 217 } else { 218 page = virt_to_page(cpu_addr); 219 } 220 221 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 222 if (!ret) 223 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 224 return ret; 225 } 226 EXPORT_SYMBOL(dma_common_get_sgtable); 227 228 /* 229 * Create userspace mapping for the DMA-coherent memory. 230 */ 231 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 232 void *cpu_addr, dma_addr_t dma_addr, size_t size, 233 unsigned long attrs) 234 { 235 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 236 unsigned long user_count = vma_pages(vma); 237 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 238 unsigned long off = vma->vm_pgoff; 239 unsigned long pfn; 240 int ret = -ENXIO; 241 242 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 243 244 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 245 return ret; 246 247 if (off >= count || user_count > count - off) 248 return -ENXIO; 249 250 if (!dev_is_dma_coherent(dev)) { 251 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 252 return -ENXIO; 253 pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); 254 } else { 255 pfn = page_to_pfn(virt_to_page(cpu_addr)); 256 } 257 258 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 259 user_count << PAGE_SHIFT, vma->vm_page_prot); 260 #else 261 return -ENXIO; 262 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 263 } 264 EXPORT_SYMBOL(dma_common_mmap); 265 266 #ifdef CONFIG_MMU 267 static struct vm_struct *__dma_common_pages_remap(struct page **pages, 268 size_t size, unsigned long vm_flags, pgprot_t prot, 269 const void *caller) 270 { 271 struct vm_struct *area; 272 273 area = get_vm_area_caller(size, vm_flags, caller); 274 if (!area) 275 return NULL; 276 277 if (map_vm_area(area, prot, pages)) { 278 vunmap(area->addr); 279 return NULL; 280 } 281 282 return area; 283 } 284 285 /* 286 * remaps an array of PAGE_SIZE pages into another vm_area 287 * Cannot be used in non-sleeping contexts 288 */ 289 void *dma_common_pages_remap(struct page **pages, size_t size, 290 unsigned long vm_flags, pgprot_t prot, 291 const void *caller) 292 { 293 struct vm_struct *area; 294 295 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); 296 if (!area) 297 return NULL; 298 299 area->pages = pages; 300 301 return area->addr; 302 } 303 304 /* 305 * remaps an allocated contiguous region into another vm_area. 306 * Cannot be used in non-sleeping contexts 307 */ 308 309 void *dma_common_contiguous_remap(struct page *page, size_t size, 310 unsigned long vm_flags, 311 pgprot_t prot, const void *caller) 312 { 313 int i; 314 struct page **pages; 315 struct vm_struct *area; 316 317 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); 318 if (!pages) 319 return NULL; 320 321 for (i = 0; i < (size >> PAGE_SHIFT); i++) 322 pages[i] = nth_page(page, i); 323 324 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); 325 326 kfree(pages); 327 328 if (!area) 329 return NULL; 330 return area->addr; 331 } 332 333 /* 334 * unmaps a range previously mapped by dma_common_*_remap 335 */ 336 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) 337 { 338 struct vm_struct *area = find_vm_area(cpu_addr); 339 340 if (!area || (area->flags & vm_flags) != vm_flags) { 341 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 342 return; 343 } 344 345 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); 346 vunmap(cpu_addr); 347 } 348 #endif 349