1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2cf65a0f6SChristoph Hellwig /* 3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines 4cf65a0f6SChristoph Hellwig * 5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH 6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7cf65a0f6SChristoph Hellwig */ 805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */ 9cf65a0f6SChristoph Hellwig #include <linux/acpi.h> 100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h> 11cf65a0f6SChristoph Hellwig #include <linux/export.h> 12cf65a0f6SChristoph Hellwig #include <linux/gfp.h> 13*7ade4f10SAlexander Potapenko #include <linux/kmsan.h> 14cf65a0f6SChristoph Hellwig #include <linux/of_device.h> 15cf65a0f6SChristoph Hellwig #include <linux/slab.h> 16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h> 17a1fd09e8SChristoph Hellwig #include "debug.h" 1819c65c3dSChristoph Hellwig #include "direct.h" 19cf65a0f6SChristoph Hellwig 206d4e9a8eSChristoph Hellwig bool dma_default_coherent; 216d4e9a8eSChristoph Hellwig 22cf65a0f6SChristoph Hellwig /* 23cf65a0f6SChristoph Hellwig * Managed DMA API 24cf65a0f6SChristoph Hellwig */ 25cf65a0f6SChristoph Hellwig struct dma_devres { 26cf65a0f6SChristoph Hellwig size_t size; 27cf65a0f6SChristoph Hellwig void *vaddr; 28cf65a0f6SChristoph Hellwig dma_addr_t dma_handle; 29cf65a0f6SChristoph Hellwig unsigned long attrs; 30cf65a0f6SChristoph Hellwig }; 31cf65a0f6SChristoph Hellwig 32cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res) 33cf65a0f6SChristoph Hellwig { 34cf65a0f6SChristoph Hellwig struct dma_devres *this = res; 35cf65a0f6SChristoph Hellwig 36cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 37cf65a0f6SChristoph Hellwig this->attrs); 38cf65a0f6SChristoph Hellwig } 39cf65a0f6SChristoph Hellwig 40cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data) 41cf65a0f6SChristoph Hellwig { 42cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data; 43cf65a0f6SChristoph Hellwig 44cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) { 45cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size || 46cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle); 47cf65a0f6SChristoph Hellwig return 1; 48cf65a0f6SChristoph Hellwig } 49cf65a0f6SChristoph Hellwig return 0; 50cf65a0f6SChristoph Hellwig } 51cf65a0f6SChristoph Hellwig 52cf65a0f6SChristoph Hellwig /** 53cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent() 54cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for 55cf65a0f6SChristoph Hellwig * @size: Size of allocation 56cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free 57cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free 58cf65a0f6SChristoph Hellwig * 59cf65a0f6SChristoph Hellwig * Managed dma_free_coherent(). 60cf65a0f6SChristoph Hellwig */ 61cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 62cf65a0f6SChristoph Hellwig dma_addr_t dma_handle) 63cf65a0f6SChristoph Hellwig { 64cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle }; 65cf65a0f6SChristoph Hellwig 66cf65a0f6SChristoph Hellwig dma_free_coherent(dev, size, vaddr, dma_handle); 67cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 68cf65a0f6SChristoph Hellwig } 69cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent); 70cf65a0f6SChristoph Hellwig 71cf65a0f6SChristoph Hellwig /** 72cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs() 73cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for 74cf65a0f6SChristoph Hellwig * @size: Size of allocation 75cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 76cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 77cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace. 78cf65a0f6SChristoph Hellwig * 79cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be 80cf65a0f6SChristoph Hellwig * automatically released on driver detach. 81cf65a0f6SChristoph Hellwig * 82cf65a0f6SChristoph Hellwig * RETURNS: 83cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 84cf65a0f6SChristoph Hellwig */ 85cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 86cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs) 87cf65a0f6SChristoph Hellwig { 88cf65a0f6SChristoph Hellwig struct dma_devres *dr; 89cf65a0f6SChristoph Hellwig void *vaddr; 90cf65a0f6SChristoph Hellwig 91cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 92cf65a0f6SChristoph Hellwig if (!dr) 93cf65a0f6SChristoph Hellwig return NULL; 94cf65a0f6SChristoph Hellwig 95cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 96cf65a0f6SChristoph Hellwig if (!vaddr) { 97cf65a0f6SChristoph Hellwig devres_free(dr); 98cf65a0f6SChristoph Hellwig return NULL; 99cf65a0f6SChristoph Hellwig } 100cf65a0f6SChristoph Hellwig 101cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 102cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 103cf65a0f6SChristoph Hellwig dr->size = size; 104cf65a0f6SChristoph Hellwig dr->attrs = attrs; 105cf65a0f6SChristoph Hellwig 106cf65a0f6SChristoph Hellwig devres_add(dev, dr); 107cf65a0f6SChristoph Hellwig 108cf65a0f6SChristoph Hellwig return vaddr; 109cf65a0f6SChristoph Hellwig } 110cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs); 111cf65a0f6SChristoph Hellwig 112d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask, 113d35834c6SChristoph Hellwig const struct dma_map_ops *ops) 114d3fa60d7SChristoph Hellwig { 115d35834c6SChristoph Hellwig if (likely(!ops)) 116d35834c6SChristoph Hellwig return true; 117d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS 118d35834c6SChristoph Hellwig if (dev->dma_ops_bypass) 119d35834c6SChristoph Hellwig return min_not_zero(mask, dev->bus_dma_limit) >= 120d35834c6SChristoph Hellwig dma_direct_get_required_mask(dev); 121d35834c6SChristoph Hellwig #endif 122d35834c6SChristoph Hellwig return false; 123d35834c6SChristoph Hellwig } 124d35834c6SChristoph Hellwig 125d35834c6SChristoph Hellwig 126d35834c6SChristoph Hellwig /* 127d35834c6SChristoph Hellwig * Check if the devices uses a direct mapping for streaming DMA operations. 128d35834c6SChristoph Hellwig * This allows IOMMU drivers to set a bypass mode if the DMA mask is large 129d35834c6SChristoph Hellwig * enough. 130d35834c6SChristoph Hellwig */ 131d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev, 132d35834c6SChristoph Hellwig const struct dma_map_ops *ops) 133d35834c6SChristoph Hellwig { 134d35834c6SChristoph Hellwig return dma_go_direct(dev, dev->coherent_dma_mask, ops); 135d35834c6SChristoph Hellwig } 136d35834c6SChristoph Hellwig 137d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev, 138d35834c6SChristoph Hellwig const struct dma_map_ops *ops) 139d35834c6SChristoph Hellwig { 140d35834c6SChristoph Hellwig return dma_go_direct(dev, *dev->dma_mask, ops); 141d3fa60d7SChristoph Hellwig } 142d3fa60d7SChristoph Hellwig 143d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 144d3fa60d7SChristoph Hellwig size_t offset, size_t size, enum dma_data_direction dir, 145d3fa60d7SChristoph Hellwig unsigned long attrs) 146d3fa60d7SChristoph Hellwig { 147d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 148d3fa60d7SChristoph Hellwig dma_addr_t addr; 149d3fa60d7SChristoph Hellwig 150d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 151f959dcd6SThomas Tai 152f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask)) 153f959dcd6SThomas Tai return DMA_MAPPING_ERROR; 154f959dcd6SThomas Tai 1558d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 1568d8d53cfSAlexey Kardashevskiy arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) 157d3fa60d7SChristoph Hellwig addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 158d3fa60d7SChristoph Hellwig else 159d3fa60d7SChristoph Hellwig addr = ops->map_page(dev, page, offset, size, dir, attrs); 160*7ade4f10SAlexander Potapenko kmsan_handle_dma(page, offset, size, dir); 161c2bbf9d1SHamza Mahfooz debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); 162d3fa60d7SChristoph Hellwig 163d3fa60d7SChristoph Hellwig return addr; 164d3fa60d7SChristoph Hellwig } 165d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs); 166d3fa60d7SChristoph Hellwig 167d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 168d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 169d3fa60d7SChristoph Hellwig { 170d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 171d3fa60d7SChristoph Hellwig 172d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 1738d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 1748d8d53cfSAlexey Kardashevskiy arch_dma_unmap_page_direct(dev, addr + size)) 175d3fa60d7SChristoph Hellwig dma_direct_unmap_page(dev, addr, size, dir, attrs); 176d3fa60d7SChristoph Hellwig else if (ops->unmap_page) 177d3fa60d7SChristoph Hellwig ops->unmap_page(dev, addr, size, dir, attrs); 178d3fa60d7SChristoph Hellwig debug_dma_unmap_page(dev, addr, size, dir); 179d3fa60d7SChristoph Hellwig } 180d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs); 181d3fa60d7SChristoph Hellwig 182fffe3cc8SLogan Gunthorpe static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 183fffe3cc8SLogan Gunthorpe int nents, enum dma_data_direction dir, unsigned long attrs) 184d3fa60d7SChristoph Hellwig { 185d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 186d3fa60d7SChristoph Hellwig int ents; 187d3fa60d7SChristoph Hellwig 188d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 189f959dcd6SThomas Tai 190f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask)) 191f959dcd6SThomas Tai return 0; 192f959dcd6SThomas Tai 1938d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 1948d8d53cfSAlexey Kardashevskiy arch_dma_map_sg_direct(dev, sg, nents)) 195d3fa60d7SChristoph Hellwig ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 196d3fa60d7SChristoph Hellwig else 197d3fa60d7SChristoph Hellwig ents = ops->map_sg(dev, sg, nents, dir, attrs); 198fffe3cc8SLogan Gunthorpe 199*7ade4f10SAlexander Potapenko if (ents > 0) { 200*7ade4f10SAlexander Potapenko kmsan_handle_dma_sg(sg, nents, dir); 201c2bbf9d1SHamza Mahfooz debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); 202*7ade4f10SAlexander Potapenko } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && 203*7ade4f10SAlexander Potapenko ents != -EIO && ents != -EREMOTEIO)) { 204fffe3cc8SLogan Gunthorpe return -EIO; 205*7ade4f10SAlexander Potapenko } 206d3fa60d7SChristoph Hellwig 207d3fa60d7SChristoph Hellwig return ents; 208d3fa60d7SChristoph Hellwig } 209fffe3cc8SLogan Gunthorpe 210fffe3cc8SLogan Gunthorpe /** 211fffe3cc8SLogan Gunthorpe * dma_map_sg_attrs - Map the given buffer for DMA 212fffe3cc8SLogan Gunthorpe * @dev: The device for which to perform the DMA operation 213fffe3cc8SLogan Gunthorpe * @sg: The sg_table object describing the buffer 214a61cb601SChristoph Hellwig * @nents: Number of entries to map 215fffe3cc8SLogan Gunthorpe * @dir: DMA direction 216fffe3cc8SLogan Gunthorpe * @attrs: Optional DMA attributes for the map operation 217fffe3cc8SLogan Gunthorpe * 218fffe3cc8SLogan Gunthorpe * Maps a buffer described by a scatterlist passed in the sg argument with 219fffe3cc8SLogan Gunthorpe * nents segments for the @dir DMA operation by the @dev device. 220fffe3cc8SLogan Gunthorpe * 221fffe3cc8SLogan Gunthorpe * Returns the number of mapped entries (which can be less than nents) 222fffe3cc8SLogan Gunthorpe * on success. Zero is returned for any error. 223fffe3cc8SLogan Gunthorpe * 224fffe3cc8SLogan Gunthorpe * dma_unmap_sg_attrs() should be used to unmap the buffer with the 225fffe3cc8SLogan Gunthorpe * original sg and original nents (not the value returned by this funciton). 226fffe3cc8SLogan Gunthorpe */ 2272a047e06SChristoph Hellwig unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 228fffe3cc8SLogan Gunthorpe int nents, enum dma_data_direction dir, unsigned long attrs) 229fffe3cc8SLogan Gunthorpe { 230fffe3cc8SLogan Gunthorpe int ret; 231fffe3cc8SLogan Gunthorpe 232fffe3cc8SLogan Gunthorpe ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); 233fffe3cc8SLogan Gunthorpe if (ret < 0) 234fffe3cc8SLogan Gunthorpe return 0; 235fffe3cc8SLogan Gunthorpe return ret; 236fffe3cc8SLogan Gunthorpe } 237d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs); 238d3fa60d7SChristoph Hellwig 239fffe3cc8SLogan Gunthorpe /** 240fffe3cc8SLogan Gunthorpe * dma_map_sgtable - Map the given buffer for DMA 241fffe3cc8SLogan Gunthorpe * @dev: The device for which to perform the DMA operation 242fffe3cc8SLogan Gunthorpe * @sgt: The sg_table object describing the buffer 243fffe3cc8SLogan Gunthorpe * @dir: DMA direction 244fffe3cc8SLogan Gunthorpe * @attrs: Optional DMA attributes for the map operation 245fffe3cc8SLogan Gunthorpe * 246fffe3cc8SLogan Gunthorpe * Maps a buffer described by a scatterlist stored in the given sg_table 247fffe3cc8SLogan Gunthorpe * object for the @dir DMA operation by the @dev device. After success, the 248fffe3cc8SLogan Gunthorpe * ownership for the buffer is transferred to the DMA domain. One has to 249fffe3cc8SLogan Gunthorpe * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 250fffe3cc8SLogan Gunthorpe * ownership of the buffer back to the CPU domain before touching the 251fffe3cc8SLogan Gunthorpe * buffer by the CPU. 252fffe3cc8SLogan Gunthorpe * 253fffe3cc8SLogan Gunthorpe * Returns 0 on success or a negative error code on error. The following 254fffe3cc8SLogan Gunthorpe * error codes are supported with the given meaning: 255fffe3cc8SLogan Gunthorpe * 256011a9ce8SLogan Gunthorpe * -EINVAL An invalid argument, unaligned access or other error 257fffe3cc8SLogan Gunthorpe * in usage. Will not succeed if retried. 258011a9ce8SLogan Gunthorpe * -ENOMEM Insufficient resources (like memory or IOVA space) to 259fffe3cc8SLogan Gunthorpe * complete the mapping. Should succeed if retried later. 260011a9ce8SLogan Gunthorpe * -EIO Legacy error code with an unknown meaning. eg. this is 26184197024SLogan Gunthorpe * returned if a lower level call returned 26284197024SLogan Gunthorpe * DMA_MAPPING_ERROR. 26384197024SLogan Gunthorpe * -EREMOTEIO The DMA device cannot access P2PDMA memory specified 26484197024SLogan Gunthorpe * in the sg_table. This will not succeed if retried. 265fffe3cc8SLogan Gunthorpe */ 266fffe3cc8SLogan Gunthorpe int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 267fffe3cc8SLogan Gunthorpe enum dma_data_direction dir, unsigned long attrs) 268fffe3cc8SLogan Gunthorpe { 269fffe3cc8SLogan Gunthorpe int nents; 270fffe3cc8SLogan Gunthorpe 271fffe3cc8SLogan Gunthorpe nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 272fffe3cc8SLogan Gunthorpe if (nents < 0) 273fffe3cc8SLogan Gunthorpe return nents; 274fffe3cc8SLogan Gunthorpe sgt->nents = nents; 275fffe3cc8SLogan Gunthorpe return 0; 276fffe3cc8SLogan Gunthorpe } 277fffe3cc8SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_map_sgtable); 278fffe3cc8SLogan Gunthorpe 279d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 280d3fa60d7SChristoph Hellwig int nents, enum dma_data_direction dir, 281d3fa60d7SChristoph Hellwig unsigned long attrs) 282d3fa60d7SChristoph Hellwig { 283d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 284d3fa60d7SChristoph Hellwig 285d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 286d3fa60d7SChristoph Hellwig debug_dma_unmap_sg(dev, sg, nents, dir); 2878d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 2888d8d53cfSAlexey Kardashevskiy arch_dma_unmap_sg_direct(dev, sg, nents)) 289d3fa60d7SChristoph Hellwig dma_direct_unmap_sg(dev, sg, nents, dir, attrs); 290d3fa60d7SChristoph Hellwig else if (ops->unmap_sg) 291d3fa60d7SChristoph Hellwig ops->unmap_sg(dev, sg, nents, dir, attrs); 292d3fa60d7SChristoph Hellwig } 293d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs); 294d3fa60d7SChristoph Hellwig 295d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 296d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs) 297d3fa60d7SChristoph Hellwig { 298d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 299d3fa60d7SChristoph Hellwig dma_addr_t addr = DMA_MAPPING_ERROR; 300d3fa60d7SChristoph Hellwig 301d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 302d3fa60d7SChristoph Hellwig 303f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask)) 304f959dcd6SThomas Tai return DMA_MAPPING_ERROR; 305f959dcd6SThomas Tai 306d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 307d3fa60d7SChristoph Hellwig addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); 308d3fa60d7SChristoph Hellwig else if (ops->map_resource) 309d3fa60d7SChristoph Hellwig addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 310d3fa60d7SChristoph Hellwig 311c2bbf9d1SHamza Mahfooz debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); 312d3fa60d7SChristoph Hellwig return addr; 313d3fa60d7SChristoph Hellwig } 314d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource); 315d3fa60d7SChristoph Hellwig 316d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 317d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 318d3fa60d7SChristoph Hellwig { 319d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 320d3fa60d7SChristoph Hellwig 321d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 322d35834c6SChristoph Hellwig if (!dma_map_direct(dev, ops) && ops->unmap_resource) 323d3fa60d7SChristoph Hellwig ops->unmap_resource(dev, addr, size, dir, attrs); 324d3fa60d7SChristoph Hellwig debug_dma_unmap_resource(dev, addr, size, dir); 325d3fa60d7SChristoph Hellwig } 326d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource); 327d3fa60d7SChristoph Hellwig 328d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 329d3fa60d7SChristoph Hellwig enum dma_data_direction dir) 330d3fa60d7SChristoph Hellwig { 331d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 332d3fa60d7SChristoph Hellwig 333d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 334d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 335d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_cpu(dev, addr, size, dir); 336d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_cpu) 337d3fa60d7SChristoph Hellwig ops->sync_single_for_cpu(dev, addr, size, dir); 338d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_cpu(dev, addr, size, dir); 339d3fa60d7SChristoph Hellwig } 340d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu); 341d3fa60d7SChristoph Hellwig 342d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, 343d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir) 344d3fa60d7SChristoph Hellwig { 345d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 346d3fa60d7SChristoph Hellwig 347d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 348d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 349d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_device(dev, addr, size, dir); 350d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_device) 351d3fa60d7SChristoph Hellwig ops->sync_single_for_device(dev, addr, size, dir); 352d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_device(dev, addr, size, dir); 353d3fa60d7SChristoph Hellwig } 354d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device); 355d3fa60d7SChristoph Hellwig 356d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 357d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir) 358d3fa60d7SChristoph Hellwig { 359d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 360d3fa60d7SChristoph Hellwig 361d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 362d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 363d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); 364d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_cpu) 365d3fa60d7SChristoph Hellwig ops->sync_sg_for_cpu(dev, sg, nelems, dir); 366d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 367d3fa60d7SChristoph Hellwig } 368d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu); 369d3fa60d7SChristoph Hellwig 370d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 371d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir) 372d3fa60d7SChristoph Hellwig { 373d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 374d3fa60d7SChristoph Hellwig 375d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 376d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 377d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_device(dev, sg, nelems, dir); 378d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_device) 379d3fa60d7SChristoph Hellwig ops->sync_sg_for_device(dev, sg, nelems, dir); 380d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 381d3fa60d7SChristoph Hellwig } 382d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device); 383d3fa60d7SChristoph Hellwig 384cf65a0f6SChristoph Hellwig /* 38514451467SChristoph Hellwig * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems 38614451467SChristoph Hellwig * that the intention is to allow exporting memory allocated via the 38714451467SChristoph Hellwig * coherent DMA APIs through the dma_buf API, which only accepts a 38814451467SChristoph Hellwig * scattertable. This presents a couple of problems: 38914451467SChristoph Hellwig * 1. Not all memory allocated via the coherent DMA APIs is backed by 39014451467SChristoph Hellwig * a struct page 39114451467SChristoph Hellwig * 2. Passing coherent DMA memory into the streaming APIs is not allowed 39214451467SChristoph Hellwig * as we will try to flush the memory through a different alias to that 39314451467SChristoph Hellwig * actually being used (and the flushes are redundant.) 39414451467SChristoph Hellwig */ 3957249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 3967249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 3977249c1a5SChristoph Hellwig unsigned long attrs) 3987249c1a5SChristoph Hellwig { 3997249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 400356da6d0SChristoph Hellwig 401d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 40234dc0ea6SChristoph Hellwig return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, 403f9f3232aSChristoph Hellwig size, attrs); 404f9f3232aSChristoph Hellwig if (!ops->get_sgtable) 405f9f3232aSChristoph Hellwig return -ENXIO; 406f9f3232aSChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); 4077249c1a5SChristoph Hellwig } 4087249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs); 409cf65a0f6SChristoph Hellwig 41033dcb37cSChristoph Hellwig #ifdef CONFIG_MMU 41133dcb37cSChristoph Hellwig /* 41233dcb37cSChristoph Hellwig * Return the page attributes used for mapping dma_alloc_* memory, either in 41333dcb37cSChristoph Hellwig * kernel space if remapping is needed, or to userspace through dma_mmap_*. 41433dcb37cSChristoph Hellwig */ 41533dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 41633dcb37cSChristoph Hellwig { 417efa70f2fSChristoph Hellwig if (dev_is_dma_coherent(dev)) 41833dcb37cSChristoph Hellwig return prot; 419419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE 420419e2f18SChristoph Hellwig if (attrs & DMA_ATTR_WRITE_COMBINE) 421419e2f18SChristoph Hellwig return pgprot_writecombine(prot); 422419e2f18SChristoph Hellwig #endif 423419e2f18SChristoph Hellwig return pgprot_dmacoherent(prot); 42433dcb37cSChristoph Hellwig } 42533dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */ 42633dcb37cSChristoph Hellwig 4277249c1a5SChristoph Hellwig /** 428e29ccc18SChristoph Hellwig * dma_can_mmap - check if a given device supports dma_mmap_* 429e29ccc18SChristoph Hellwig * @dev: device to check 430e29ccc18SChristoph Hellwig * 431e29ccc18SChristoph Hellwig * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to 432e29ccc18SChristoph Hellwig * map DMA allocations to userspace. 433e29ccc18SChristoph Hellwig */ 434e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev) 435e29ccc18SChristoph Hellwig { 436e29ccc18SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 437e29ccc18SChristoph Hellwig 438d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 43934dc0ea6SChristoph Hellwig return dma_direct_can_mmap(dev); 440e29ccc18SChristoph Hellwig return ops->mmap != NULL; 441e29ccc18SChristoph Hellwig } 442e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap); 443e29ccc18SChristoph Hellwig 444e29ccc18SChristoph Hellwig /** 4457249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space 4467249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 4477249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping 4487249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 4497249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs 4507249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs 4517249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs 4527249c1a5SChristoph Hellwig * 4537249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user 4547249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the 4557249c1a5SChristoph Hellwig * user space mapping has been released. 4567249c1a5SChristoph Hellwig */ 4577249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 4587249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 4597249c1a5SChristoph Hellwig unsigned long attrs) 4607249c1a5SChristoph Hellwig { 4617249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 462356da6d0SChristoph Hellwig 463d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 46434dc0ea6SChristoph Hellwig return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, 465f9f3232aSChristoph Hellwig attrs); 466f9f3232aSChristoph Hellwig if (!ops->mmap) 467f9f3232aSChristoph Hellwig return -ENXIO; 4687249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 4697249c1a5SChristoph Hellwig } 4707249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs); 47105887cb6SChristoph Hellwig 47205887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev) 47305887cb6SChristoph Hellwig { 47405887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 47505887cb6SChristoph Hellwig 476d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 477356da6d0SChristoph Hellwig return dma_direct_get_required_mask(dev); 47805887cb6SChristoph Hellwig if (ops->get_required_mask) 47905887cb6SChristoph Hellwig return ops->get_required_mask(dev); 480249baa54SChristoph Hellwig 481249baa54SChristoph Hellwig /* 482249baa54SChristoph Hellwig * We require every DMA ops implementation to at least support a 32-bit 483249baa54SChristoph Hellwig * DMA mask (and use bounce buffering if that isn't supported in 484249baa54SChristoph Hellwig * hardware). As the direct mapping code has its own routine to 485249baa54SChristoph Hellwig * actually report an optimal mask we default to 32-bit here as that 486249baa54SChristoph Hellwig * is the right thing for most IOMMUs, and at least not actively 487249baa54SChristoph Hellwig * harmful in general. 488249baa54SChristoph Hellwig */ 489249baa54SChristoph Hellwig return DMA_BIT_MASK(32); 49005887cb6SChristoph Hellwig } 49105887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask); 49205887cb6SChristoph Hellwig 4937249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 4947249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs) 4957249c1a5SChristoph Hellwig { 4967249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 4977249c1a5SChristoph Hellwig void *cpu_addr; 4987249c1a5SChristoph Hellwig 499148a97d5SDan Carpenter WARN_ON_ONCE(!dev->coherent_dma_mask); 5007249c1a5SChristoph Hellwig 5017249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 5027249c1a5SChristoph Hellwig return cpu_addr; 5037249c1a5SChristoph Hellwig 5047249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */ 5057249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 5067249c1a5SChristoph Hellwig 507d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 508356da6d0SChristoph Hellwig cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 509356da6d0SChristoph Hellwig else if (ops->alloc) 510356da6d0SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 511356da6d0SChristoph Hellwig else 5127249c1a5SChristoph Hellwig return NULL; 5137249c1a5SChristoph Hellwig 514c2bbf9d1SHamza Mahfooz debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); 5157249c1a5SChristoph Hellwig return cpu_addr; 5167249c1a5SChristoph Hellwig } 5177249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs); 5187249c1a5SChristoph Hellwig 5197249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 5207249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 5217249c1a5SChristoph Hellwig { 5227249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 5237249c1a5SChristoph Hellwig 5247249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 5257249c1a5SChristoph Hellwig return; 5267249c1a5SChristoph Hellwig /* 5277249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via 5287249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting 5297249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to 5307249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is 5317249c1a5SChristoph Hellwig * probably misusing the coherent API anyway. 5327249c1a5SChristoph Hellwig */ 5337249c1a5SChristoph Hellwig WARN_ON(irqs_disabled()); 5347249c1a5SChristoph Hellwig 535356da6d0SChristoph Hellwig if (!cpu_addr) 5367249c1a5SChristoph Hellwig return; 5377249c1a5SChristoph Hellwig 5387249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 539d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 540356da6d0SChristoph Hellwig dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); 541356da6d0SChristoph Hellwig else if (ops->free) 5427249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs); 5437249c1a5SChristoph Hellwig } 5447249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs); 5457249c1a5SChristoph Hellwig 546198c50e2SChristoph Hellwig static struct page *__dma_alloc_pages(struct device *dev, size_t size, 547efa70f2fSChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 548efa70f2fSChristoph Hellwig { 549efa70f2fSChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 550efa70f2fSChristoph Hellwig 551efa70f2fSChristoph Hellwig if (WARN_ON_ONCE(!dev->coherent_dma_mask)) 552efa70f2fSChristoph Hellwig return NULL; 553efa70f2fSChristoph Hellwig if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) 554efa70f2fSChristoph Hellwig return NULL; 555efa70f2fSChristoph Hellwig 556efa70f2fSChristoph Hellwig size = PAGE_ALIGN(size); 557efa70f2fSChristoph Hellwig if (dma_alloc_direct(dev, ops)) 558198c50e2SChristoph Hellwig return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); 559198c50e2SChristoph Hellwig if (!ops->alloc_pages) 560efa70f2fSChristoph Hellwig return NULL; 561198c50e2SChristoph Hellwig return ops->alloc_pages(dev, size, dma_handle, dir, gfp); 562198c50e2SChristoph Hellwig } 563efa70f2fSChristoph Hellwig 564198c50e2SChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size, 565198c50e2SChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 566198c50e2SChristoph Hellwig { 567198c50e2SChristoph Hellwig struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); 568198c50e2SChristoph Hellwig 569198c50e2SChristoph Hellwig if (page) 570c2bbf9d1SHamza Mahfooz debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); 571efa70f2fSChristoph Hellwig return page; 572efa70f2fSChristoph Hellwig } 573efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_pages); 574efa70f2fSChristoph Hellwig 575198c50e2SChristoph Hellwig static void __dma_free_pages(struct device *dev, size_t size, struct page *page, 576efa70f2fSChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir) 577efa70f2fSChristoph Hellwig { 578efa70f2fSChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 579efa70f2fSChristoph Hellwig 580efa70f2fSChristoph Hellwig size = PAGE_ALIGN(size); 581efa70f2fSChristoph Hellwig if (dma_alloc_direct(dev, ops)) 582efa70f2fSChristoph Hellwig dma_direct_free_pages(dev, size, page, dma_handle, dir); 583efa70f2fSChristoph Hellwig else if (ops->free_pages) 584efa70f2fSChristoph Hellwig ops->free_pages(dev, size, page, dma_handle, dir); 585efa70f2fSChristoph Hellwig } 586198c50e2SChristoph Hellwig 587198c50e2SChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page, 588198c50e2SChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir) 589198c50e2SChristoph Hellwig { 590198c50e2SChristoph Hellwig debug_dma_unmap_page(dev, dma_handle, size, dir); 591198c50e2SChristoph Hellwig __dma_free_pages(dev, size, page, dma_handle, dir); 592198c50e2SChristoph Hellwig } 593efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_pages); 594efa70f2fSChristoph Hellwig 595eedb0b12SChristoph Hellwig int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, 596eedb0b12SChristoph Hellwig size_t size, struct page *page) 597eedb0b12SChristoph Hellwig { 598eedb0b12SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 599eedb0b12SChristoph Hellwig 600eedb0b12SChristoph Hellwig if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) 601eedb0b12SChristoph Hellwig return -ENXIO; 602eedb0b12SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, 603eedb0b12SChristoph Hellwig page_to_pfn(page) + vma->vm_pgoff, 604eedb0b12SChristoph Hellwig vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); 605eedb0b12SChristoph Hellwig } 606eedb0b12SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_pages); 607eedb0b12SChristoph Hellwig 6087d5b5738SChristoph Hellwig static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, 6097d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp) 6107d5b5738SChristoph Hellwig { 6117d5b5738SChristoph Hellwig struct sg_table *sgt; 6127d5b5738SChristoph Hellwig struct page *page; 6137d5b5738SChristoph Hellwig 6147d5b5738SChristoph Hellwig sgt = kmalloc(sizeof(*sgt), gfp); 6157d5b5738SChristoph Hellwig if (!sgt) 6167d5b5738SChristoph Hellwig return NULL; 6177d5b5738SChristoph Hellwig if (sg_alloc_table(sgt, 1, gfp)) 6187d5b5738SChristoph Hellwig goto out_free_sgt; 6197d5b5738SChristoph Hellwig page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); 6207d5b5738SChristoph Hellwig if (!page) 6217d5b5738SChristoph Hellwig goto out_free_table; 6227d5b5738SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 6237d5b5738SChristoph Hellwig sg_dma_len(sgt->sgl) = sgt->sgl->length; 6247d5b5738SChristoph Hellwig return sgt; 6257d5b5738SChristoph Hellwig out_free_table: 6267d5b5738SChristoph Hellwig sg_free_table(sgt); 6277d5b5738SChristoph Hellwig out_free_sgt: 6287d5b5738SChristoph Hellwig kfree(sgt); 6297d5b5738SChristoph Hellwig return NULL; 6307d5b5738SChristoph Hellwig } 6317d5b5738SChristoph Hellwig 6327d5b5738SChristoph Hellwig struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, 6337d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) 6347d5b5738SChristoph Hellwig { 6357d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6367d5b5738SChristoph Hellwig struct sg_table *sgt; 6377d5b5738SChristoph Hellwig 6387d5b5738SChristoph Hellwig if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) 6397d5b5738SChristoph Hellwig return NULL; 6407d5b5738SChristoph Hellwig 6417d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) 6427d5b5738SChristoph Hellwig sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); 6437d5b5738SChristoph Hellwig else 6447d5b5738SChristoph Hellwig sgt = alloc_single_sgt(dev, size, dir, gfp); 6457d5b5738SChristoph Hellwig 6467d5b5738SChristoph Hellwig if (sgt) { 6477d5b5738SChristoph Hellwig sgt->nents = 1; 648c2bbf9d1SHamza Mahfooz debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); 6497d5b5738SChristoph Hellwig } 6507d5b5738SChristoph Hellwig return sgt; 6517d5b5738SChristoph Hellwig } 6527d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); 6537d5b5738SChristoph Hellwig 6547d5b5738SChristoph Hellwig static void free_single_sgt(struct device *dev, size_t size, 6557d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir) 6567d5b5738SChristoph Hellwig { 6577d5b5738SChristoph Hellwig __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, 6587d5b5738SChristoph Hellwig dir); 6597d5b5738SChristoph Hellwig sg_free_table(sgt); 6607d5b5738SChristoph Hellwig kfree(sgt); 6617d5b5738SChristoph Hellwig } 6627d5b5738SChristoph Hellwig 6637d5b5738SChristoph Hellwig void dma_free_noncontiguous(struct device *dev, size_t size, 6647d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir) 6657d5b5738SChristoph Hellwig { 6667d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6677d5b5738SChristoph Hellwig 6687d5b5738SChristoph Hellwig debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 6697d5b5738SChristoph Hellwig if (ops && ops->free_noncontiguous) 6707d5b5738SChristoph Hellwig ops->free_noncontiguous(dev, size, sgt, dir); 6717d5b5738SChristoph Hellwig else 6727d5b5738SChristoph Hellwig free_single_sgt(dev, size, sgt, dir); 6737d5b5738SChristoph Hellwig } 6747d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_noncontiguous); 6757d5b5738SChristoph Hellwig 6767d5b5738SChristoph Hellwig void *dma_vmap_noncontiguous(struct device *dev, size_t size, 6777d5b5738SChristoph Hellwig struct sg_table *sgt) 6787d5b5738SChristoph Hellwig { 6797d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6807d5b5738SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 6817d5b5738SChristoph Hellwig 6827d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) 6837d5b5738SChristoph Hellwig return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); 6847d5b5738SChristoph Hellwig return page_address(sg_page(sgt->sgl)); 6857d5b5738SChristoph Hellwig } 6867d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); 6877d5b5738SChristoph Hellwig 6887d5b5738SChristoph Hellwig void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) 6897d5b5738SChristoph Hellwig { 6907d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6917d5b5738SChristoph Hellwig 6927d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) 6937d5b5738SChristoph Hellwig vunmap(vaddr); 6947d5b5738SChristoph Hellwig } 6957d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); 6967d5b5738SChristoph Hellwig 6977d5b5738SChristoph Hellwig int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, 6987d5b5738SChristoph Hellwig size_t size, struct sg_table *sgt) 6997d5b5738SChristoph Hellwig { 7007d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 7017d5b5738SChristoph Hellwig 7027d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) { 7037d5b5738SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 7047d5b5738SChristoph Hellwig 7057d5b5738SChristoph Hellwig if (vma->vm_pgoff >= count || 7067d5b5738SChristoph Hellwig vma_pages(vma) > count - vma->vm_pgoff) 7077d5b5738SChristoph Hellwig return -ENXIO; 7087d5b5738SChristoph Hellwig return vm_map_pages(vma, sgt_handle(sgt)->pages, count); 7097d5b5738SChristoph Hellwig } 7107d5b5738SChristoph Hellwig return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); 7117d5b5738SChristoph Hellwig } 7127d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); 7137d5b5738SChristoph Hellwig 7147249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask) 7157249c1a5SChristoph Hellwig { 7167249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 7177249c1a5SChristoph Hellwig 718d35834c6SChristoph Hellwig /* 719d35834c6SChristoph Hellwig * ->dma_supported sets the bypass flag, so we must always call 720d35834c6SChristoph Hellwig * into the method here unless the device is truly direct mapped. 721d35834c6SChristoph Hellwig */ 722d35834c6SChristoph Hellwig if (!ops) 723356da6d0SChristoph Hellwig return dma_direct_supported(dev, mask); 7248b1cce9fSThierry Reding if (!ops->dma_supported) 7257249c1a5SChristoph Hellwig return 1; 7267249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask); 7277249c1a5SChristoph Hellwig } 7287249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported); 7297249c1a5SChristoph Hellwig 730159bf192SLogan Gunthorpe bool dma_pci_p2pdma_supported(struct device *dev) 731159bf192SLogan Gunthorpe { 732159bf192SLogan Gunthorpe const struct dma_map_ops *ops = get_dma_ops(dev); 733159bf192SLogan Gunthorpe 734159bf192SLogan Gunthorpe /* if ops is not set, dma direct will be used which supports P2PDMA */ 735159bf192SLogan Gunthorpe if (!ops) 736159bf192SLogan Gunthorpe return true; 737159bf192SLogan Gunthorpe 738159bf192SLogan Gunthorpe /* 739159bf192SLogan Gunthorpe * Note: dma_ops_bypass is not checked here because P2PDMA should 740159bf192SLogan Gunthorpe * not be used with dma mapping ops that do not have support even 741159bf192SLogan Gunthorpe * if the specific device is bypassing them. 742159bf192SLogan Gunthorpe */ 743159bf192SLogan Gunthorpe 744159bf192SLogan Gunthorpe return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; 745159bf192SLogan Gunthorpe } 746159bf192SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); 747159bf192SLogan Gunthorpe 74811ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 74911ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask); 75011ddce15SChristoph Hellwig #else 75111ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask) do { } while (0) 75211ddce15SChristoph Hellwig #endif 75311ddce15SChristoph Hellwig 7547249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask) 7557249c1a5SChristoph Hellwig { 7564a54d16fSChristoph Hellwig /* 7574a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 7584a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 7594a54d16fSChristoph Hellwig */ 7604a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 7614a54d16fSChristoph Hellwig 7627249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask)) 7637249c1a5SChristoph Hellwig return -EIO; 7647249c1a5SChristoph Hellwig 76511ddce15SChristoph Hellwig arch_dma_set_mask(dev, mask); 7667249c1a5SChristoph Hellwig *dev->dma_mask = mask; 7677249c1a5SChristoph Hellwig return 0; 7687249c1a5SChristoph Hellwig } 7697249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask); 7707249c1a5SChristoph Hellwig 7717249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask) 7727249c1a5SChristoph Hellwig { 7734a54d16fSChristoph Hellwig /* 7744a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 7754a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 7764a54d16fSChristoph Hellwig */ 7774a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 7784a54d16fSChristoph Hellwig 7797249c1a5SChristoph Hellwig if (!dma_supported(dev, mask)) 7807249c1a5SChristoph Hellwig return -EIO; 7817249c1a5SChristoph Hellwig 7827249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask; 7837249c1a5SChristoph Hellwig return 0; 7847249c1a5SChristoph Hellwig } 7857249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask); 7868ddbe594SChristoph Hellwig 787133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev) 788133d624bSJoerg Roedel { 789133d624bSJoerg Roedel const struct dma_map_ops *ops = get_dma_ops(dev); 790133d624bSJoerg Roedel size_t size = SIZE_MAX; 791133d624bSJoerg Roedel 792d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 793133d624bSJoerg Roedel size = dma_direct_max_mapping_size(dev); 794133d624bSJoerg Roedel else if (ops && ops->max_mapping_size) 795133d624bSJoerg Roedel size = ops->max_mapping_size(dev); 796133d624bSJoerg Roedel 797133d624bSJoerg Roedel return size; 798133d624bSJoerg Roedel } 799133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size); 8006ba99411SYoshihiro Shimoda 801a229cc14SJohn Garry size_t dma_opt_mapping_size(struct device *dev) 802a229cc14SJohn Garry { 803a229cc14SJohn Garry const struct dma_map_ops *ops = get_dma_ops(dev); 804a229cc14SJohn Garry size_t size = SIZE_MAX; 805a229cc14SJohn Garry 806a229cc14SJohn Garry if (ops && ops->opt_mapping_size) 807a229cc14SJohn Garry size = ops->opt_mapping_size(); 808a229cc14SJohn Garry 809a229cc14SJohn Garry return min(dma_max_mapping_size(dev), size); 810a229cc14SJohn Garry } 811a229cc14SJohn Garry EXPORT_SYMBOL_GPL(dma_opt_mapping_size); 812a229cc14SJohn Garry 8133aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 8143aa91625SChristoph Hellwig { 8153aa91625SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 8163aa91625SChristoph Hellwig 817d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 8183aa91625SChristoph Hellwig return dma_direct_need_sync(dev, dma_addr); 8193aa91625SChristoph Hellwig return ops->sync_single_for_cpu || ops->sync_single_for_device; 8203aa91625SChristoph Hellwig } 8213aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync); 8223aa91625SChristoph Hellwig 8236ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev) 8246ba99411SYoshihiro Shimoda { 8256ba99411SYoshihiro Shimoda const struct dma_map_ops *ops = get_dma_ops(dev); 8266ba99411SYoshihiro Shimoda 8276ba99411SYoshihiro Shimoda if (!ops || !ops->get_merge_boundary) 8286ba99411SYoshihiro Shimoda return 0; /* can't merge */ 8296ba99411SYoshihiro Shimoda 8306ba99411SYoshihiro Shimoda return ops->get_merge_boundary(dev); 8316ba99411SYoshihiro Shimoda } 8326ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary); 833