1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2cf65a0f6SChristoph Hellwig /* 3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines 4cf65a0f6SChristoph Hellwig * 5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH 6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7cf65a0f6SChristoph Hellwig */ 805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */ 9cf65a0f6SChristoph Hellwig #include <linux/acpi.h> 100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h> 11cf65a0f6SChristoph Hellwig #include <linux/export.h> 12cf65a0f6SChristoph Hellwig #include <linux/gfp.h> 13cf65a0f6SChristoph Hellwig #include <linux/of_device.h> 14cf65a0f6SChristoph Hellwig #include <linux/slab.h> 15cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h> 16a1fd09e8SChristoph Hellwig #include "debug.h" 1719c65c3dSChristoph Hellwig #include "direct.h" 18cf65a0f6SChristoph Hellwig 196d4e9a8eSChristoph Hellwig bool dma_default_coherent; 206d4e9a8eSChristoph Hellwig 21cf65a0f6SChristoph Hellwig /* 22cf65a0f6SChristoph Hellwig * Managed DMA API 23cf65a0f6SChristoph Hellwig */ 24cf65a0f6SChristoph Hellwig struct dma_devres { 25cf65a0f6SChristoph Hellwig size_t size; 26cf65a0f6SChristoph Hellwig void *vaddr; 27cf65a0f6SChristoph Hellwig dma_addr_t dma_handle; 28cf65a0f6SChristoph Hellwig unsigned long attrs; 29cf65a0f6SChristoph Hellwig }; 30cf65a0f6SChristoph Hellwig 31cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res) 32cf65a0f6SChristoph Hellwig { 33cf65a0f6SChristoph Hellwig struct dma_devres *this = res; 34cf65a0f6SChristoph Hellwig 35cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 36cf65a0f6SChristoph Hellwig this->attrs); 37cf65a0f6SChristoph Hellwig } 38cf65a0f6SChristoph Hellwig 39cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data) 40cf65a0f6SChristoph Hellwig { 41cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data; 42cf65a0f6SChristoph Hellwig 43cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) { 44cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size || 45cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle); 46cf65a0f6SChristoph Hellwig return 1; 47cf65a0f6SChristoph Hellwig } 48cf65a0f6SChristoph Hellwig return 0; 49cf65a0f6SChristoph Hellwig } 50cf65a0f6SChristoph Hellwig 51cf65a0f6SChristoph Hellwig /** 52cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent() 53cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for 54cf65a0f6SChristoph Hellwig * @size: Size of allocation 55cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free 56cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free 57cf65a0f6SChristoph Hellwig * 58cf65a0f6SChristoph Hellwig * Managed dma_free_coherent(). 59cf65a0f6SChristoph Hellwig */ 60cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 61cf65a0f6SChristoph Hellwig dma_addr_t dma_handle) 62cf65a0f6SChristoph Hellwig { 63cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle }; 64cf65a0f6SChristoph Hellwig 65cf65a0f6SChristoph Hellwig dma_free_coherent(dev, size, vaddr, dma_handle); 66cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 67cf65a0f6SChristoph Hellwig } 68cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent); 69cf65a0f6SChristoph Hellwig 70cf65a0f6SChristoph Hellwig /** 71cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs() 72cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for 73cf65a0f6SChristoph Hellwig * @size: Size of allocation 74cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 75cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 76cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace. 77cf65a0f6SChristoph Hellwig * 78cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be 79cf65a0f6SChristoph Hellwig * automatically released on driver detach. 80cf65a0f6SChristoph Hellwig * 81cf65a0f6SChristoph Hellwig * RETURNS: 82cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 83cf65a0f6SChristoph Hellwig */ 84cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 85cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs) 86cf65a0f6SChristoph Hellwig { 87cf65a0f6SChristoph Hellwig struct dma_devres *dr; 88cf65a0f6SChristoph Hellwig void *vaddr; 89cf65a0f6SChristoph Hellwig 90cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 91cf65a0f6SChristoph Hellwig if (!dr) 92cf65a0f6SChristoph Hellwig return NULL; 93cf65a0f6SChristoph Hellwig 94cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 95cf65a0f6SChristoph Hellwig if (!vaddr) { 96cf65a0f6SChristoph Hellwig devres_free(dr); 97cf65a0f6SChristoph Hellwig return NULL; 98cf65a0f6SChristoph Hellwig } 99cf65a0f6SChristoph Hellwig 100cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 101cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 102cf65a0f6SChristoph Hellwig dr->size = size; 103cf65a0f6SChristoph Hellwig dr->attrs = attrs; 104cf65a0f6SChristoph Hellwig 105cf65a0f6SChristoph Hellwig devres_add(dev, dr); 106cf65a0f6SChristoph Hellwig 107cf65a0f6SChristoph Hellwig return vaddr; 108cf65a0f6SChristoph Hellwig } 109cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs); 110cf65a0f6SChristoph Hellwig 111d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask, 112d35834c6SChristoph Hellwig const struct dma_map_ops *ops) 113d3fa60d7SChristoph Hellwig { 114d35834c6SChristoph Hellwig if (likely(!ops)) 115d35834c6SChristoph Hellwig return true; 116d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS 117d35834c6SChristoph Hellwig if (dev->dma_ops_bypass) 118d35834c6SChristoph Hellwig return min_not_zero(mask, dev->bus_dma_limit) >= 119d35834c6SChristoph Hellwig dma_direct_get_required_mask(dev); 120d35834c6SChristoph Hellwig #endif 121d35834c6SChristoph Hellwig return false; 122d35834c6SChristoph Hellwig } 123d35834c6SChristoph Hellwig 124d35834c6SChristoph Hellwig 125d35834c6SChristoph Hellwig /* 126d35834c6SChristoph Hellwig * Check if the devices uses a direct mapping for streaming DMA operations. 127d35834c6SChristoph Hellwig * This allows IOMMU drivers to set a bypass mode if the DMA mask is large 128d35834c6SChristoph Hellwig * enough. 129d35834c6SChristoph Hellwig */ 130d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev, 131d35834c6SChristoph Hellwig const struct dma_map_ops *ops) 132d35834c6SChristoph Hellwig { 133d35834c6SChristoph Hellwig return dma_go_direct(dev, dev->coherent_dma_mask, ops); 134d35834c6SChristoph Hellwig } 135d35834c6SChristoph Hellwig 136d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev, 137d35834c6SChristoph Hellwig const struct dma_map_ops *ops) 138d35834c6SChristoph Hellwig { 139d35834c6SChristoph Hellwig return dma_go_direct(dev, *dev->dma_mask, ops); 140d3fa60d7SChristoph Hellwig } 141d3fa60d7SChristoph Hellwig 142d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 143d3fa60d7SChristoph Hellwig size_t offset, size_t size, enum dma_data_direction dir, 144d3fa60d7SChristoph Hellwig unsigned long attrs) 145d3fa60d7SChristoph Hellwig { 146d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 147d3fa60d7SChristoph Hellwig dma_addr_t addr; 148d3fa60d7SChristoph Hellwig 149d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 150f959dcd6SThomas Tai 151f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask)) 152f959dcd6SThomas Tai return DMA_MAPPING_ERROR; 153f959dcd6SThomas Tai 1548d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 1558d8d53cfSAlexey Kardashevskiy arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) 156d3fa60d7SChristoph Hellwig addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 157d3fa60d7SChristoph Hellwig else 158d3fa60d7SChristoph Hellwig addr = ops->map_page(dev, page, offset, size, dir, attrs); 159*c2bbf9d1SHamza Mahfooz debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); 160d3fa60d7SChristoph Hellwig 161d3fa60d7SChristoph Hellwig return addr; 162d3fa60d7SChristoph Hellwig } 163d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs); 164d3fa60d7SChristoph Hellwig 165d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 166d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 167d3fa60d7SChristoph Hellwig { 168d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 169d3fa60d7SChristoph Hellwig 170d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 1718d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 1728d8d53cfSAlexey Kardashevskiy arch_dma_unmap_page_direct(dev, addr + size)) 173d3fa60d7SChristoph Hellwig dma_direct_unmap_page(dev, addr, size, dir, attrs); 174d3fa60d7SChristoph Hellwig else if (ops->unmap_page) 175d3fa60d7SChristoph Hellwig ops->unmap_page(dev, addr, size, dir, attrs); 176d3fa60d7SChristoph Hellwig debug_dma_unmap_page(dev, addr, size, dir); 177d3fa60d7SChristoph Hellwig } 178d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs); 179d3fa60d7SChristoph Hellwig 180fffe3cc8SLogan Gunthorpe static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 181fffe3cc8SLogan Gunthorpe int nents, enum dma_data_direction dir, unsigned long attrs) 182d3fa60d7SChristoph Hellwig { 183d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 184d3fa60d7SChristoph Hellwig int ents; 185d3fa60d7SChristoph Hellwig 186d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 187f959dcd6SThomas Tai 188f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask)) 189f959dcd6SThomas Tai return 0; 190f959dcd6SThomas Tai 1918d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 1928d8d53cfSAlexey Kardashevskiy arch_dma_map_sg_direct(dev, sg, nents)) 193d3fa60d7SChristoph Hellwig ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 194d3fa60d7SChristoph Hellwig else 195d3fa60d7SChristoph Hellwig ents = ops->map_sg(dev, sg, nents, dir, attrs); 196fffe3cc8SLogan Gunthorpe 197fffe3cc8SLogan Gunthorpe if (ents > 0) 198*c2bbf9d1SHamza Mahfooz debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); 199fffe3cc8SLogan Gunthorpe else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && 200d03c5441SLogan Gunthorpe ents != -EIO)) 201fffe3cc8SLogan Gunthorpe return -EIO; 202d3fa60d7SChristoph Hellwig 203d3fa60d7SChristoph Hellwig return ents; 204d3fa60d7SChristoph Hellwig } 205fffe3cc8SLogan Gunthorpe 206fffe3cc8SLogan Gunthorpe /** 207fffe3cc8SLogan Gunthorpe * dma_map_sg_attrs - Map the given buffer for DMA 208fffe3cc8SLogan Gunthorpe * @dev: The device for which to perform the DMA operation 209fffe3cc8SLogan Gunthorpe * @sg: The sg_table object describing the buffer 210a61cb601SChristoph Hellwig * @nents: Number of entries to map 211fffe3cc8SLogan Gunthorpe * @dir: DMA direction 212fffe3cc8SLogan Gunthorpe * @attrs: Optional DMA attributes for the map operation 213fffe3cc8SLogan Gunthorpe * 214fffe3cc8SLogan Gunthorpe * Maps a buffer described by a scatterlist passed in the sg argument with 215fffe3cc8SLogan Gunthorpe * nents segments for the @dir DMA operation by the @dev device. 216fffe3cc8SLogan Gunthorpe * 217fffe3cc8SLogan Gunthorpe * Returns the number of mapped entries (which can be less than nents) 218fffe3cc8SLogan Gunthorpe * on success. Zero is returned for any error. 219fffe3cc8SLogan Gunthorpe * 220fffe3cc8SLogan Gunthorpe * dma_unmap_sg_attrs() should be used to unmap the buffer with the 221fffe3cc8SLogan Gunthorpe * original sg and original nents (not the value returned by this funciton). 222fffe3cc8SLogan Gunthorpe */ 2232a047e06SChristoph Hellwig unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 224fffe3cc8SLogan Gunthorpe int nents, enum dma_data_direction dir, unsigned long attrs) 225fffe3cc8SLogan Gunthorpe { 226fffe3cc8SLogan Gunthorpe int ret; 227fffe3cc8SLogan Gunthorpe 228fffe3cc8SLogan Gunthorpe ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); 229fffe3cc8SLogan Gunthorpe if (ret < 0) 230fffe3cc8SLogan Gunthorpe return 0; 231fffe3cc8SLogan Gunthorpe return ret; 232fffe3cc8SLogan Gunthorpe } 233d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs); 234d3fa60d7SChristoph Hellwig 235fffe3cc8SLogan Gunthorpe /** 236fffe3cc8SLogan Gunthorpe * dma_map_sgtable - Map the given buffer for DMA 237fffe3cc8SLogan Gunthorpe * @dev: The device for which to perform the DMA operation 238fffe3cc8SLogan Gunthorpe * @sgt: The sg_table object describing the buffer 239fffe3cc8SLogan Gunthorpe * @dir: DMA direction 240fffe3cc8SLogan Gunthorpe * @attrs: Optional DMA attributes for the map operation 241fffe3cc8SLogan Gunthorpe * 242fffe3cc8SLogan Gunthorpe * Maps a buffer described by a scatterlist stored in the given sg_table 243fffe3cc8SLogan Gunthorpe * object for the @dir DMA operation by the @dev device. After success, the 244fffe3cc8SLogan Gunthorpe * ownership for the buffer is transferred to the DMA domain. One has to 245fffe3cc8SLogan Gunthorpe * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 246fffe3cc8SLogan Gunthorpe * ownership of the buffer back to the CPU domain before touching the 247fffe3cc8SLogan Gunthorpe * buffer by the CPU. 248fffe3cc8SLogan Gunthorpe * 249fffe3cc8SLogan Gunthorpe * Returns 0 on success or a negative error code on error. The following 250fffe3cc8SLogan Gunthorpe * error codes are supported with the given meaning: 251fffe3cc8SLogan Gunthorpe * 252011a9ce8SLogan Gunthorpe * -EINVAL An invalid argument, unaligned access or other error 253fffe3cc8SLogan Gunthorpe * in usage. Will not succeed if retried. 254011a9ce8SLogan Gunthorpe * -ENOMEM Insufficient resources (like memory or IOVA space) to 255fffe3cc8SLogan Gunthorpe * complete the mapping. Should succeed if retried later. 256011a9ce8SLogan Gunthorpe * -EIO Legacy error code with an unknown meaning. eg. this is 257fffe3cc8SLogan Gunthorpe * returned if a lower level call returned DMA_MAPPING_ERROR. 258fffe3cc8SLogan Gunthorpe */ 259fffe3cc8SLogan Gunthorpe int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 260fffe3cc8SLogan Gunthorpe enum dma_data_direction dir, unsigned long attrs) 261fffe3cc8SLogan Gunthorpe { 262fffe3cc8SLogan Gunthorpe int nents; 263fffe3cc8SLogan Gunthorpe 264fffe3cc8SLogan Gunthorpe nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 265fffe3cc8SLogan Gunthorpe if (nents < 0) 266fffe3cc8SLogan Gunthorpe return nents; 267fffe3cc8SLogan Gunthorpe sgt->nents = nents; 268fffe3cc8SLogan Gunthorpe return 0; 269fffe3cc8SLogan Gunthorpe } 270fffe3cc8SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_map_sgtable); 271fffe3cc8SLogan Gunthorpe 272d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 273d3fa60d7SChristoph Hellwig int nents, enum dma_data_direction dir, 274d3fa60d7SChristoph Hellwig unsigned long attrs) 275d3fa60d7SChristoph Hellwig { 276d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 277d3fa60d7SChristoph Hellwig 278d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 279d3fa60d7SChristoph Hellwig debug_dma_unmap_sg(dev, sg, nents, dir); 2808d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) || 2818d8d53cfSAlexey Kardashevskiy arch_dma_unmap_sg_direct(dev, sg, nents)) 282d3fa60d7SChristoph Hellwig dma_direct_unmap_sg(dev, sg, nents, dir, attrs); 283d3fa60d7SChristoph Hellwig else if (ops->unmap_sg) 284d3fa60d7SChristoph Hellwig ops->unmap_sg(dev, sg, nents, dir, attrs); 285d3fa60d7SChristoph Hellwig } 286d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs); 287d3fa60d7SChristoph Hellwig 288d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 289d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs) 290d3fa60d7SChristoph Hellwig { 291d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 292d3fa60d7SChristoph Hellwig dma_addr_t addr = DMA_MAPPING_ERROR; 293d3fa60d7SChristoph Hellwig 294d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 295d3fa60d7SChristoph Hellwig 296f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask)) 297f959dcd6SThomas Tai return DMA_MAPPING_ERROR; 298f959dcd6SThomas Tai 299d3fa60d7SChristoph Hellwig /* Don't allow RAM to be mapped */ 300d3fa60d7SChristoph Hellwig if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) 301d3fa60d7SChristoph Hellwig return DMA_MAPPING_ERROR; 302d3fa60d7SChristoph Hellwig 303d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 304d3fa60d7SChristoph Hellwig addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); 305d3fa60d7SChristoph Hellwig else if (ops->map_resource) 306d3fa60d7SChristoph Hellwig addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 307d3fa60d7SChristoph Hellwig 308*c2bbf9d1SHamza Mahfooz debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); 309d3fa60d7SChristoph Hellwig return addr; 310d3fa60d7SChristoph Hellwig } 311d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource); 312d3fa60d7SChristoph Hellwig 313d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 314d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 315d3fa60d7SChristoph Hellwig { 316d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 317d3fa60d7SChristoph Hellwig 318d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 319d35834c6SChristoph Hellwig if (!dma_map_direct(dev, ops) && ops->unmap_resource) 320d3fa60d7SChristoph Hellwig ops->unmap_resource(dev, addr, size, dir, attrs); 321d3fa60d7SChristoph Hellwig debug_dma_unmap_resource(dev, addr, size, dir); 322d3fa60d7SChristoph Hellwig } 323d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource); 324d3fa60d7SChristoph Hellwig 325d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 326d3fa60d7SChristoph Hellwig enum dma_data_direction dir) 327d3fa60d7SChristoph Hellwig { 328d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 329d3fa60d7SChristoph Hellwig 330d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 331d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 332d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_cpu(dev, addr, size, dir); 333d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_cpu) 334d3fa60d7SChristoph Hellwig ops->sync_single_for_cpu(dev, addr, size, dir); 335d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_cpu(dev, addr, size, dir); 336d3fa60d7SChristoph Hellwig } 337d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu); 338d3fa60d7SChristoph Hellwig 339d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, 340d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir) 341d3fa60d7SChristoph Hellwig { 342d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 343d3fa60d7SChristoph Hellwig 344d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 345d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 346d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_device(dev, addr, size, dir); 347d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_device) 348d3fa60d7SChristoph Hellwig ops->sync_single_for_device(dev, addr, size, dir); 349d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_device(dev, addr, size, dir); 350d3fa60d7SChristoph Hellwig } 351d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device); 352d3fa60d7SChristoph Hellwig 353d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 354d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir) 355d3fa60d7SChristoph Hellwig { 356d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 357d3fa60d7SChristoph Hellwig 358d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 359d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 360d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); 361d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_cpu) 362d3fa60d7SChristoph Hellwig ops->sync_sg_for_cpu(dev, sg, nelems, dir); 363d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 364d3fa60d7SChristoph Hellwig } 365d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu); 366d3fa60d7SChristoph Hellwig 367d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 368d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir) 369d3fa60d7SChristoph Hellwig { 370d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 371d3fa60d7SChristoph Hellwig 372d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 373d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 374d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_device(dev, sg, nelems, dir); 375d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_device) 376d3fa60d7SChristoph Hellwig ops->sync_sg_for_device(dev, sg, nelems, dir); 377d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 378d3fa60d7SChristoph Hellwig } 379d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device); 380d3fa60d7SChristoph Hellwig 381cf65a0f6SChristoph Hellwig /* 38214451467SChristoph Hellwig * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems 38314451467SChristoph Hellwig * that the intention is to allow exporting memory allocated via the 38414451467SChristoph Hellwig * coherent DMA APIs through the dma_buf API, which only accepts a 38514451467SChristoph Hellwig * scattertable. This presents a couple of problems: 38614451467SChristoph Hellwig * 1. Not all memory allocated via the coherent DMA APIs is backed by 38714451467SChristoph Hellwig * a struct page 38814451467SChristoph Hellwig * 2. Passing coherent DMA memory into the streaming APIs is not allowed 38914451467SChristoph Hellwig * as we will try to flush the memory through a different alias to that 39014451467SChristoph Hellwig * actually being used (and the flushes are redundant.) 39114451467SChristoph Hellwig */ 3927249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 3937249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 3947249c1a5SChristoph Hellwig unsigned long attrs) 3957249c1a5SChristoph Hellwig { 3967249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 397356da6d0SChristoph Hellwig 398d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 39934dc0ea6SChristoph Hellwig return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, 400f9f3232aSChristoph Hellwig size, attrs); 401f9f3232aSChristoph Hellwig if (!ops->get_sgtable) 402f9f3232aSChristoph Hellwig return -ENXIO; 403f9f3232aSChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); 4047249c1a5SChristoph Hellwig } 4057249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs); 406cf65a0f6SChristoph Hellwig 40733dcb37cSChristoph Hellwig #ifdef CONFIG_MMU 40833dcb37cSChristoph Hellwig /* 40933dcb37cSChristoph Hellwig * Return the page attributes used for mapping dma_alloc_* memory, either in 41033dcb37cSChristoph Hellwig * kernel space if remapping is needed, or to userspace through dma_mmap_*. 41133dcb37cSChristoph Hellwig */ 41233dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 41333dcb37cSChristoph Hellwig { 41417c4a2aeSThomas Hellstrom if (force_dma_unencrypted(dev)) 41517c4a2aeSThomas Hellstrom prot = pgprot_decrypted(prot); 416efa70f2fSChristoph Hellwig if (dev_is_dma_coherent(dev)) 41733dcb37cSChristoph Hellwig return prot; 418419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE 419419e2f18SChristoph Hellwig if (attrs & DMA_ATTR_WRITE_COMBINE) 420419e2f18SChristoph Hellwig return pgprot_writecombine(prot); 421419e2f18SChristoph Hellwig #endif 422419e2f18SChristoph Hellwig return pgprot_dmacoherent(prot); 42333dcb37cSChristoph Hellwig } 42433dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */ 42533dcb37cSChristoph Hellwig 4267249c1a5SChristoph Hellwig /** 427e29ccc18SChristoph Hellwig * dma_can_mmap - check if a given device supports dma_mmap_* 428e29ccc18SChristoph Hellwig * @dev: device to check 429e29ccc18SChristoph Hellwig * 430e29ccc18SChristoph Hellwig * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to 431e29ccc18SChristoph Hellwig * map DMA allocations to userspace. 432e29ccc18SChristoph Hellwig */ 433e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev) 434e29ccc18SChristoph Hellwig { 435e29ccc18SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 436e29ccc18SChristoph Hellwig 437d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 43834dc0ea6SChristoph Hellwig return dma_direct_can_mmap(dev); 439e29ccc18SChristoph Hellwig return ops->mmap != NULL; 440e29ccc18SChristoph Hellwig } 441e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap); 442e29ccc18SChristoph Hellwig 443e29ccc18SChristoph Hellwig /** 4447249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space 4457249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 4467249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping 4477249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 4487249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs 4497249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs 4507249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs 4517249c1a5SChristoph Hellwig * 4527249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user 4537249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the 4547249c1a5SChristoph Hellwig * user space mapping has been released. 4557249c1a5SChristoph Hellwig */ 4567249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 4577249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 4587249c1a5SChristoph Hellwig unsigned long attrs) 4597249c1a5SChristoph Hellwig { 4607249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 461356da6d0SChristoph Hellwig 462d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 46334dc0ea6SChristoph Hellwig return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, 464f9f3232aSChristoph Hellwig attrs); 465f9f3232aSChristoph Hellwig if (!ops->mmap) 466f9f3232aSChristoph Hellwig return -ENXIO; 4677249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 4687249c1a5SChristoph Hellwig } 4697249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs); 47005887cb6SChristoph Hellwig 47105887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev) 47205887cb6SChristoph Hellwig { 47305887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 47405887cb6SChristoph Hellwig 475d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 476356da6d0SChristoph Hellwig return dma_direct_get_required_mask(dev); 47705887cb6SChristoph Hellwig if (ops->get_required_mask) 47805887cb6SChristoph Hellwig return ops->get_required_mask(dev); 479249baa54SChristoph Hellwig 480249baa54SChristoph Hellwig /* 481249baa54SChristoph Hellwig * We require every DMA ops implementation to at least support a 32-bit 482249baa54SChristoph Hellwig * DMA mask (and use bounce buffering if that isn't supported in 483249baa54SChristoph Hellwig * hardware). As the direct mapping code has its own routine to 484249baa54SChristoph Hellwig * actually report an optimal mask we default to 32-bit here as that 485249baa54SChristoph Hellwig * is the right thing for most IOMMUs, and at least not actively 486249baa54SChristoph Hellwig * harmful in general. 487249baa54SChristoph Hellwig */ 488249baa54SChristoph Hellwig return DMA_BIT_MASK(32); 48905887cb6SChristoph Hellwig } 49005887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask); 49105887cb6SChristoph Hellwig 4927249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 4937249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs) 4947249c1a5SChristoph Hellwig { 4957249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 4967249c1a5SChristoph Hellwig void *cpu_addr; 4977249c1a5SChristoph Hellwig 498148a97d5SDan Carpenter WARN_ON_ONCE(!dev->coherent_dma_mask); 4997249c1a5SChristoph Hellwig 5007249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 5017249c1a5SChristoph Hellwig return cpu_addr; 5027249c1a5SChristoph Hellwig 5037249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */ 5047249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 5057249c1a5SChristoph Hellwig 506d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 507356da6d0SChristoph Hellwig cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 508356da6d0SChristoph Hellwig else if (ops->alloc) 509356da6d0SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 510356da6d0SChristoph Hellwig else 5117249c1a5SChristoph Hellwig return NULL; 5127249c1a5SChristoph Hellwig 513*c2bbf9d1SHamza Mahfooz debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); 5147249c1a5SChristoph Hellwig return cpu_addr; 5157249c1a5SChristoph Hellwig } 5167249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs); 5177249c1a5SChristoph Hellwig 5187249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 5197249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 5207249c1a5SChristoph Hellwig { 5217249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 5227249c1a5SChristoph Hellwig 5237249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 5247249c1a5SChristoph Hellwig return; 5257249c1a5SChristoph Hellwig /* 5267249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via 5277249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting 5287249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to 5297249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is 5307249c1a5SChristoph Hellwig * probably misusing the coherent API anyway. 5317249c1a5SChristoph Hellwig */ 5327249c1a5SChristoph Hellwig WARN_ON(irqs_disabled()); 5337249c1a5SChristoph Hellwig 534356da6d0SChristoph Hellwig if (!cpu_addr) 5357249c1a5SChristoph Hellwig return; 5367249c1a5SChristoph Hellwig 5377249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 538d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops)) 539356da6d0SChristoph Hellwig dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); 540356da6d0SChristoph Hellwig else if (ops->free) 5417249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs); 5427249c1a5SChristoph Hellwig } 5437249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs); 5447249c1a5SChristoph Hellwig 545198c50e2SChristoph Hellwig static struct page *__dma_alloc_pages(struct device *dev, size_t size, 546efa70f2fSChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 547efa70f2fSChristoph Hellwig { 548efa70f2fSChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 549efa70f2fSChristoph Hellwig 550efa70f2fSChristoph Hellwig if (WARN_ON_ONCE(!dev->coherent_dma_mask)) 551efa70f2fSChristoph Hellwig return NULL; 552efa70f2fSChristoph Hellwig if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) 553efa70f2fSChristoph Hellwig return NULL; 554efa70f2fSChristoph Hellwig 555efa70f2fSChristoph Hellwig size = PAGE_ALIGN(size); 556efa70f2fSChristoph Hellwig if (dma_alloc_direct(dev, ops)) 557198c50e2SChristoph Hellwig return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); 558198c50e2SChristoph Hellwig if (!ops->alloc_pages) 559efa70f2fSChristoph Hellwig return NULL; 560198c50e2SChristoph Hellwig return ops->alloc_pages(dev, size, dma_handle, dir, gfp); 561198c50e2SChristoph Hellwig } 562efa70f2fSChristoph Hellwig 563198c50e2SChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size, 564198c50e2SChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 565198c50e2SChristoph Hellwig { 566198c50e2SChristoph Hellwig struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); 567198c50e2SChristoph Hellwig 568198c50e2SChristoph Hellwig if (page) 569*c2bbf9d1SHamza Mahfooz debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); 570efa70f2fSChristoph Hellwig return page; 571efa70f2fSChristoph Hellwig } 572efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_pages); 573efa70f2fSChristoph Hellwig 574198c50e2SChristoph Hellwig static void __dma_free_pages(struct device *dev, size_t size, struct page *page, 575efa70f2fSChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir) 576efa70f2fSChristoph Hellwig { 577efa70f2fSChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 578efa70f2fSChristoph Hellwig 579efa70f2fSChristoph Hellwig size = PAGE_ALIGN(size); 580efa70f2fSChristoph Hellwig if (dma_alloc_direct(dev, ops)) 581efa70f2fSChristoph Hellwig dma_direct_free_pages(dev, size, page, dma_handle, dir); 582efa70f2fSChristoph Hellwig else if (ops->free_pages) 583efa70f2fSChristoph Hellwig ops->free_pages(dev, size, page, dma_handle, dir); 584efa70f2fSChristoph Hellwig } 585198c50e2SChristoph Hellwig 586198c50e2SChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page, 587198c50e2SChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir) 588198c50e2SChristoph Hellwig { 589198c50e2SChristoph Hellwig debug_dma_unmap_page(dev, dma_handle, size, dir); 590198c50e2SChristoph Hellwig __dma_free_pages(dev, size, page, dma_handle, dir); 591198c50e2SChristoph Hellwig } 592efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_pages); 593efa70f2fSChristoph Hellwig 594eedb0b12SChristoph Hellwig int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, 595eedb0b12SChristoph Hellwig size_t size, struct page *page) 596eedb0b12SChristoph Hellwig { 597eedb0b12SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 598eedb0b12SChristoph Hellwig 599eedb0b12SChristoph Hellwig if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) 600eedb0b12SChristoph Hellwig return -ENXIO; 601eedb0b12SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, 602eedb0b12SChristoph Hellwig page_to_pfn(page) + vma->vm_pgoff, 603eedb0b12SChristoph Hellwig vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); 604eedb0b12SChristoph Hellwig } 605eedb0b12SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_pages); 606eedb0b12SChristoph Hellwig 6077d5b5738SChristoph Hellwig static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, 6087d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp) 6097d5b5738SChristoph Hellwig { 6107d5b5738SChristoph Hellwig struct sg_table *sgt; 6117d5b5738SChristoph Hellwig struct page *page; 6127d5b5738SChristoph Hellwig 6137d5b5738SChristoph Hellwig sgt = kmalloc(sizeof(*sgt), gfp); 6147d5b5738SChristoph Hellwig if (!sgt) 6157d5b5738SChristoph Hellwig return NULL; 6167d5b5738SChristoph Hellwig if (sg_alloc_table(sgt, 1, gfp)) 6177d5b5738SChristoph Hellwig goto out_free_sgt; 6187d5b5738SChristoph Hellwig page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); 6197d5b5738SChristoph Hellwig if (!page) 6207d5b5738SChristoph Hellwig goto out_free_table; 6217d5b5738SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 6227d5b5738SChristoph Hellwig sg_dma_len(sgt->sgl) = sgt->sgl->length; 6237d5b5738SChristoph Hellwig return sgt; 6247d5b5738SChristoph Hellwig out_free_table: 6257d5b5738SChristoph Hellwig sg_free_table(sgt); 6267d5b5738SChristoph Hellwig out_free_sgt: 6277d5b5738SChristoph Hellwig kfree(sgt); 6287d5b5738SChristoph Hellwig return NULL; 6297d5b5738SChristoph Hellwig } 6307d5b5738SChristoph Hellwig 6317d5b5738SChristoph Hellwig struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, 6327d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) 6337d5b5738SChristoph Hellwig { 6347d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6357d5b5738SChristoph Hellwig struct sg_table *sgt; 6367d5b5738SChristoph Hellwig 6377d5b5738SChristoph Hellwig if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) 6387d5b5738SChristoph Hellwig return NULL; 6397d5b5738SChristoph Hellwig 6407d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) 6417d5b5738SChristoph Hellwig sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); 6427d5b5738SChristoph Hellwig else 6437d5b5738SChristoph Hellwig sgt = alloc_single_sgt(dev, size, dir, gfp); 6447d5b5738SChristoph Hellwig 6457d5b5738SChristoph Hellwig if (sgt) { 6467d5b5738SChristoph Hellwig sgt->nents = 1; 647*c2bbf9d1SHamza Mahfooz debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); 6487d5b5738SChristoph Hellwig } 6497d5b5738SChristoph Hellwig return sgt; 6507d5b5738SChristoph Hellwig } 6517d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); 6527d5b5738SChristoph Hellwig 6537d5b5738SChristoph Hellwig static void free_single_sgt(struct device *dev, size_t size, 6547d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir) 6557d5b5738SChristoph Hellwig { 6567d5b5738SChristoph Hellwig __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, 6577d5b5738SChristoph Hellwig dir); 6587d5b5738SChristoph Hellwig sg_free_table(sgt); 6597d5b5738SChristoph Hellwig kfree(sgt); 6607d5b5738SChristoph Hellwig } 6617d5b5738SChristoph Hellwig 6627d5b5738SChristoph Hellwig void dma_free_noncontiguous(struct device *dev, size_t size, 6637d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir) 6647d5b5738SChristoph Hellwig { 6657d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6667d5b5738SChristoph Hellwig 6677d5b5738SChristoph Hellwig debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 6687d5b5738SChristoph Hellwig if (ops && ops->free_noncontiguous) 6697d5b5738SChristoph Hellwig ops->free_noncontiguous(dev, size, sgt, dir); 6707d5b5738SChristoph Hellwig else 6717d5b5738SChristoph Hellwig free_single_sgt(dev, size, sgt, dir); 6727d5b5738SChristoph Hellwig } 6737d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_noncontiguous); 6747d5b5738SChristoph Hellwig 6757d5b5738SChristoph Hellwig void *dma_vmap_noncontiguous(struct device *dev, size_t size, 6767d5b5738SChristoph Hellwig struct sg_table *sgt) 6777d5b5738SChristoph Hellwig { 6787d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6797d5b5738SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 6807d5b5738SChristoph Hellwig 6817d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) 6827d5b5738SChristoph Hellwig return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); 6837d5b5738SChristoph Hellwig return page_address(sg_page(sgt->sgl)); 6847d5b5738SChristoph Hellwig } 6857d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); 6867d5b5738SChristoph Hellwig 6877d5b5738SChristoph Hellwig void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) 6887d5b5738SChristoph Hellwig { 6897d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 6907d5b5738SChristoph Hellwig 6917d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) 6927d5b5738SChristoph Hellwig vunmap(vaddr); 6937d5b5738SChristoph Hellwig } 6947d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); 6957d5b5738SChristoph Hellwig 6967d5b5738SChristoph Hellwig int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, 6977d5b5738SChristoph Hellwig size_t size, struct sg_table *sgt) 6987d5b5738SChristoph Hellwig { 6997d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 7007d5b5738SChristoph Hellwig 7017d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) { 7027d5b5738SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 7037d5b5738SChristoph Hellwig 7047d5b5738SChristoph Hellwig if (vma->vm_pgoff >= count || 7057d5b5738SChristoph Hellwig vma_pages(vma) > count - vma->vm_pgoff) 7067d5b5738SChristoph Hellwig return -ENXIO; 7077d5b5738SChristoph Hellwig return vm_map_pages(vma, sgt_handle(sgt)->pages, count); 7087d5b5738SChristoph Hellwig } 7097d5b5738SChristoph Hellwig return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); 7107d5b5738SChristoph Hellwig } 7117d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); 7127d5b5738SChristoph Hellwig 7137249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask) 7147249c1a5SChristoph Hellwig { 7157249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 7167249c1a5SChristoph Hellwig 717d35834c6SChristoph Hellwig /* 718d35834c6SChristoph Hellwig * ->dma_supported sets the bypass flag, so we must always call 719d35834c6SChristoph Hellwig * into the method here unless the device is truly direct mapped. 720d35834c6SChristoph Hellwig */ 721d35834c6SChristoph Hellwig if (!ops) 722356da6d0SChristoph Hellwig return dma_direct_supported(dev, mask); 7238b1cce9fSThierry Reding if (!ops->dma_supported) 7247249c1a5SChristoph Hellwig return 1; 7257249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask); 7267249c1a5SChristoph Hellwig } 7277249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported); 7287249c1a5SChristoph Hellwig 72911ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 73011ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask); 73111ddce15SChristoph Hellwig #else 73211ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask) do { } while (0) 73311ddce15SChristoph Hellwig #endif 73411ddce15SChristoph Hellwig 7357249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask) 7367249c1a5SChristoph Hellwig { 7374a54d16fSChristoph Hellwig /* 7384a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 7394a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 7404a54d16fSChristoph Hellwig */ 7414a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 7424a54d16fSChristoph Hellwig 7437249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask)) 7447249c1a5SChristoph Hellwig return -EIO; 7457249c1a5SChristoph Hellwig 74611ddce15SChristoph Hellwig arch_dma_set_mask(dev, mask); 7477249c1a5SChristoph Hellwig *dev->dma_mask = mask; 7487249c1a5SChristoph Hellwig return 0; 7497249c1a5SChristoph Hellwig } 7507249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask); 7517249c1a5SChristoph Hellwig 7527249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 7537249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask) 7547249c1a5SChristoph Hellwig { 7554a54d16fSChristoph Hellwig /* 7564a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 7574a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 7584a54d16fSChristoph Hellwig */ 7594a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 7604a54d16fSChristoph Hellwig 7617249c1a5SChristoph Hellwig if (!dma_supported(dev, mask)) 7627249c1a5SChristoph Hellwig return -EIO; 7637249c1a5SChristoph Hellwig 7647249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask; 7657249c1a5SChristoph Hellwig return 0; 7667249c1a5SChristoph Hellwig } 7677249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask); 7687249c1a5SChristoph Hellwig #endif 7698ddbe594SChristoph Hellwig 770133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev) 771133d624bSJoerg Roedel { 772133d624bSJoerg Roedel const struct dma_map_ops *ops = get_dma_ops(dev); 773133d624bSJoerg Roedel size_t size = SIZE_MAX; 774133d624bSJoerg Roedel 775d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 776133d624bSJoerg Roedel size = dma_direct_max_mapping_size(dev); 777133d624bSJoerg Roedel else if (ops && ops->max_mapping_size) 778133d624bSJoerg Roedel size = ops->max_mapping_size(dev); 779133d624bSJoerg Roedel 780133d624bSJoerg Roedel return size; 781133d624bSJoerg Roedel } 782133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size); 7836ba99411SYoshihiro Shimoda 7843aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 7853aa91625SChristoph Hellwig { 7863aa91625SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 7873aa91625SChristoph Hellwig 788d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops)) 7893aa91625SChristoph Hellwig return dma_direct_need_sync(dev, dma_addr); 7903aa91625SChristoph Hellwig return ops->sync_single_for_cpu || ops->sync_single_for_device; 7913aa91625SChristoph Hellwig } 7923aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync); 7933aa91625SChristoph Hellwig 7946ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev) 7956ba99411SYoshihiro Shimoda { 7966ba99411SYoshihiro Shimoda const struct dma_map_ops *ops = get_dma_ops(dev); 7976ba99411SYoshihiro Shimoda 7986ba99411SYoshihiro Shimoda if (!ops || !ops->get_merge_boundary) 7996ba99411SYoshihiro Shimoda return 0; /* can't merge */ 8006ba99411SYoshihiro Shimoda 8016ba99411SYoshihiro Shimoda return ops->get_merge_boundary(dev); 8026ba99411SYoshihiro Shimoda } 8036ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary); 804