1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2cf65a0f6SChristoph Hellwig /* 3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines 4cf65a0f6SChristoph Hellwig * 5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH 6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7cf65a0f6SChristoph Hellwig */ 805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */ 9cf65a0f6SChristoph Hellwig #include <linux/acpi.h> 10356da6d0SChristoph Hellwig #include <linux/dma-direct.h> 1158b04406SChristoph Hellwig #include <linux/dma-noncoherent.h> 12cf65a0f6SChristoph Hellwig #include <linux/export.h> 13cf65a0f6SChristoph Hellwig #include <linux/gfp.h> 14cf65a0f6SChristoph Hellwig #include <linux/of_device.h> 15cf65a0f6SChristoph Hellwig #include <linux/slab.h> 16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h> 17cf65a0f6SChristoph Hellwig 18cf65a0f6SChristoph Hellwig /* 19cf65a0f6SChristoph Hellwig * Managed DMA API 20cf65a0f6SChristoph Hellwig */ 21cf65a0f6SChristoph Hellwig struct dma_devres { 22cf65a0f6SChristoph Hellwig size_t size; 23cf65a0f6SChristoph Hellwig void *vaddr; 24cf65a0f6SChristoph Hellwig dma_addr_t dma_handle; 25cf65a0f6SChristoph Hellwig unsigned long attrs; 26cf65a0f6SChristoph Hellwig }; 27cf65a0f6SChristoph Hellwig 28cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res) 29cf65a0f6SChristoph Hellwig { 30cf65a0f6SChristoph Hellwig struct dma_devres *this = res; 31cf65a0f6SChristoph Hellwig 32cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 33cf65a0f6SChristoph Hellwig this->attrs); 34cf65a0f6SChristoph Hellwig } 35cf65a0f6SChristoph Hellwig 36cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data) 37cf65a0f6SChristoph Hellwig { 38cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data; 39cf65a0f6SChristoph Hellwig 40cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) { 41cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size || 42cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle); 43cf65a0f6SChristoph Hellwig return 1; 44cf65a0f6SChristoph Hellwig } 45cf65a0f6SChristoph Hellwig return 0; 46cf65a0f6SChristoph Hellwig } 47cf65a0f6SChristoph Hellwig 48cf65a0f6SChristoph Hellwig /** 49cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent() 50cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for 51cf65a0f6SChristoph Hellwig * @size: Size of allocation 52cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free 53cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free 54cf65a0f6SChristoph Hellwig * 55cf65a0f6SChristoph Hellwig * Managed dma_free_coherent(). 56cf65a0f6SChristoph Hellwig */ 57cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 58cf65a0f6SChristoph Hellwig dma_addr_t dma_handle) 59cf65a0f6SChristoph Hellwig { 60cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle }; 61cf65a0f6SChristoph Hellwig 62cf65a0f6SChristoph Hellwig dma_free_coherent(dev, size, vaddr, dma_handle); 63cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 64cf65a0f6SChristoph Hellwig } 65cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent); 66cf65a0f6SChristoph Hellwig 67cf65a0f6SChristoph Hellwig /** 68cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs() 69cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for 70cf65a0f6SChristoph Hellwig * @size: Size of allocation 71cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 72cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 73cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace. 74cf65a0f6SChristoph Hellwig * 75cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be 76cf65a0f6SChristoph Hellwig * automatically released on driver detach. 77cf65a0f6SChristoph Hellwig * 78cf65a0f6SChristoph Hellwig * RETURNS: 79cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 80cf65a0f6SChristoph Hellwig */ 81cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 82cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs) 83cf65a0f6SChristoph Hellwig { 84cf65a0f6SChristoph Hellwig struct dma_devres *dr; 85cf65a0f6SChristoph Hellwig void *vaddr; 86cf65a0f6SChristoph Hellwig 87cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 88cf65a0f6SChristoph Hellwig if (!dr) 89cf65a0f6SChristoph Hellwig return NULL; 90cf65a0f6SChristoph Hellwig 91cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 92cf65a0f6SChristoph Hellwig if (!vaddr) { 93cf65a0f6SChristoph Hellwig devres_free(dr); 94cf65a0f6SChristoph Hellwig return NULL; 95cf65a0f6SChristoph Hellwig } 96cf65a0f6SChristoph Hellwig 97cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 98cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 99cf65a0f6SChristoph Hellwig dr->size = size; 100cf65a0f6SChristoph Hellwig dr->attrs = attrs; 101cf65a0f6SChristoph Hellwig 102cf65a0f6SChristoph Hellwig devres_add(dev, dr); 103cf65a0f6SChristoph Hellwig 104cf65a0f6SChristoph Hellwig return vaddr; 105cf65a0f6SChristoph Hellwig } 106cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs); 107cf65a0f6SChristoph Hellwig 108*d3fa60d7SChristoph Hellwig static inline bool dma_is_direct(const struct dma_map_ops *ops) 109*d3fa60d7SChristoph Hellwig { 110*d3fa60d7SChristoph Hellwig return likely(!ops); 111*d3fa60d7SChristoph Hellwig } 112*d3fa60d7SChristoph Hellwig 113*d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 114*d3fa60d7SChristoph Hellwig size_t offset, size_t size, enum dma_data_direction dir, 115*d3fa60d7SChristoph Hellwig unsigned long attrs) 116*d3fa60d7SChristoph Hellwig { 117*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 118*d3fa60d7SChristoph Hellwig dma_addr_t addr; 119*d3fa60d7SChristoph Hellwig 120*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 121*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 122*d3fa60d7SChristoph Hellwig addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 123*d3fa60d7SChristoph Hellwig else 124*d3fa60d7SChristoph Hellwig addr = ops->map_page(dev, page, offset, size, dir, attrs); 125*d3fa60d7SChristoph Hellwig debug_dma_map_page(dev, page, offset, size, dir, addr); 126*d3fa60d7SChristoph Hellwig 127*d3fa60d7SChristoph Hellwig return addr; 128*d3fa60d7SChristoph Hellwig } 129*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs); 130*d3fa60d7SChristoph Hellwig 131*d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 132*d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 133*d3fa60d7SChristoph Hellwig { 134*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 135*d3fa60d7SChristoph Hellwig 136*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 137*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 138*d3fa60d7SChristoph Hellwig dma_direct_unmap_page(dev, addr, size, dir, attrs); 139*d3fa60d7SChristoph Hellwig else if (ops->unmap_page) 140*d3fa60d7SChristoph Hellwig ops->unmap_page(dev, addr, size, dir, attrs); 141*d3fa60d7SChristoph Hellwig debug_dma_unmap_page(dev, addr, size, dir); 142*d3fa60d7SChristoph Hellwig } 143*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs); 144*d3fa60d7SChristoph Hellwig 145*d3fa60d7SChristoph Hellwig /* 146*d3fa60d7SChristoph Hellwig * dma_maps_sg_attrs returns 0 on error and > 0 on success. 147*d3fa60d7SChristoph Hellwig * It should never return a value < 0. 148*d3fa60d7SChristoph Hellwig */ 149*d3fa60d7SChristoph Hellwig int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, 150*d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 151*d3fa60d7SChristoph Hellwig { 152*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 153*d3fa60d7SChristoph Hellwig int ents; 154*d3fa60d7SChristoph Hellwig 155*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 156*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 157*d3fa60d7SChristoph Hellwig ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 158*d3fa60d7SChristoph Hellwig else 159*d3fa60d7SChristoph Hellwig ents = ops->map_sg(dev, sg, nents, dir, attrs); 160*d3fa60d7SChristoph Hellwig BUG_ON(ents < 0); 161*d3fa60d7SChristoph Hellwig debug_dma_map_sg(dev, sg, nents, ents, dir); 162*d3fa60d7SChristoph Hellwig 163*d3fa60d7SChristoph Hellwig return ents; 164*d3fa60d7SChristoph Hellwig } 165*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs); 166*d3fa60d7SChristoph Hellwig 167*d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 168*d3fa60d7SChristoph Hellwig int nents, enum dma_data_direction dir, 169*d3fa60d7SChristoph Hellwig unsigned long attrs) 170*d3fa60d7SChristoph Hellwig { 171*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 172*d3fa60d7SChristoph Hellwig 173*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 174*d3fa60d7SChristoph Hellwig debug_dma_unmap_sg(dev, sg, nents, dir); 175*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 176*d3fa60d7SChristoph Hellwig dma_direct_unmap_sg(dev, sg, nents, dir, attrs); 177*d3fa60d7SChristoph Hellwig else if (ops->unmap_sg) 178*d3fa60d7SChristoph Hellwig ops->unmap_sg(dev, sg, nents, dir, attrs); 179*d3fa60d7SChristoph Hellwig } 180*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs); 181*d3fa60d7SChristoph Hellwig 182*d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 183*d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs) 184*d3fa60d7SChristoph Hellwig { 185*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 186*d3fa60d7SChristoph Hellwig dma_addr_t addr = DMA_MAPPING_ERROR; 187*d3fa60d7SChristoph Hellwig 188*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 189*d3fa60d7SChristoph Hellwig 190*d3fa60d7SChristoph Hellwig /* Don't allow RAM to be mapped */ 191*d3fa60d7SChristoph Hellwig if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) 192*d3fa60d7SChristoph Hellwig return DMA_MAPPING_ERROR; 193*d3fa60d7SChristoph Hellwig 194*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 195*d3fa60d7SChristoph Hellwig addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); 196*d3fa60d7SChristoph Hellwig else if (ops->map_resource) 197*d3fa60d7SChristoph Hellwig addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 198*d3fa60d7SChristoph Hellwig 199*d3fa60d7SChristoph Hellwig debug_dma_map_resource(dev, phys_addr, size, dir, addr); 200*d3fa60d7SChristoph Hellwig return addr; 201*d3fa60d7SChristoph Hellwig } 202*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource); 203*d3fa60d7SChristoph Hellwig 204*d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 205*d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs) 206*d3fa60d7SChristoph Hellwig { 207*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 208*d3fa60d7SChristoph Hellwig 209*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 210*d3fa60d7SChristoph Hellwig if (!dma_is_direct(ops) && ops->unmap_resource) 211*d3fa60d7SChristoph Hellwig ops->unmap_resource(dev, addr, size, dir, attrs); 212*d3fa60d7SChristoph Hellwig debug_dma_unmap_resource(dev, addr, size, dir); 213*d3fa60d7SChristoph Hellwig } 214*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource); 215*d3fa60d7SChristoph Hellwig 216*d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 217*d3fa60d7SChristoph Hellwig enum dma_data_direction dir) 218*d3fa60d7SChristoph Hellwig { 219*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 220*d3fa60d7SChristoph Hellwig 221*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 222*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 223*d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_cpu(dev, addr, size, dir); 224*d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_cpu) 225*d3fa60d7SChristoph Hellwig ops->sync_single_for_cpu(dev, addr, size, dir); 226*d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_cpu(dev, addr, size, dir); 227*d3fa60d7SChristoph Hellwig } 228*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu); 229*d3fa60d7SChristoph Hellwig 230*d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, 231*d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir) 232*d3fa60d7SChristoph Hellwig { 233*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 234*d3fa60d7SChristoph Hellwig 235*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 236*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 237*d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_device(dev, addr, size, dir); 238*d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_device) 239*d3fa60d7SChristoph Hellwig ops->sync_single_for_device(dev, addr, size, dir); 240*d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_device(dev, addr, size, dir); 241*d3fa60d7SChristoph Hellwig } 242*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device); 243*d3fa60d7SChristoph Hellwig 244*d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 245*d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir) 246*d3fa60d7SChristoph Hellwig { 247*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 248*d3fa60d7SChristoph Hellwig 249*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 250*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 251*d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); 252*d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_cpu) 253*d3fa60d7SChristoph Hellwig ops->sync_sg_for_cpu(dev, sg, nelems, dir); 254*d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 255*d3fa60d7SChristoph Hellwig } 256*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu); 257*d3fa60d7SChristoph Hellwig 258*d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 259*d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir) 260*d3fa60d7SChristoph Hellwig { 261*d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 262*d3fa60d7SChristoph Hellwig 263*d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 264*d3fa60d7SChristoph Hellwig if (dma_is_direct(ops)) 265*d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_device(dev, sg, nelems, dir); 266*d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_device) 267*d3fa60d7SChristoph Hellwig ops->sync_sg_for_device(dev, sg, nelems, dir); 268*d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 269*d3fa60d7SChristoph Hellwig } 270*d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device); 271*d3fa60d7SChristoph Hellwig 272cf65a0f6SChristoph Hellwig /* 273cf65a0f6SChristoph Hellwig * Create scatter-list for the already allocated DMA buffer. 274cf65a0f6SChristoph Hellwig */ 275cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 2769406a49fSChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 2779406a49fSChristoph Hellwig unsigned long attrs) 278cf65a0f6SChristoph Hellwig { 27934dc0ea6SChristoph Hellwig struct page *page = virt_to_page(cpu_addr); 280cf65a0f6SChristoph Hellwig int ret; 281cf65a0f6SChristoph Hellwig 2829406a49fSChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 2839406a49fSChristoph Hellwig if (!ret) 284cf65a0f6SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 2859406a49fSChristoph Hellwig return ret; 286cf65a0f6SChristoph Hellwig } 2877249c1a5SChristoph Hellwig 28814451467SChristoph Hellwig /* 28914451467SChristoph Hellwig * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems 29014451467SChristoph Hellwig * that the intention is to allow exporting memory allocated via the 29114451467SChristoph Hellwig * coherent DMA APIs through the dma_buf API, which only accepts a 29214451467SChristoph Hellwig * scattertable. This presents a couple of problems: 29314451467SChristoph Hellwig * 1. Not all memory allocated via the coherent DMA APIs is backed by 29414451467SChristoph Hellwig * a struct page 29514451467SChristoph Hellwig * 2. Passing coherent DMA memory into the streaming APIs is not allowed 29614451467SChristoph Hellwig * as we will try to flush the memory through a different alias to that 29714451467SChristoph Hellwig * actually being used (and the flushes are redundant.) 29814451467SChristoph Hellwig */ 2997249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 3007249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 3017249c1a5SChristoph Hellwig unsigned long attrs) 3027249c1a5SChristoph Hellwig { 3037249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 304356da6d0SChristoph Hellwig 305f9f3232aSChristoph Hellwig if (dma_is_direct(ops)) 30634dc0ea6SChristoph Hellwig return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, 307f9f3232aSChristoph Hellwig size, attrs); 308f9f3232aSChristoph Hellwig if (!ops->get_sgtable) 309f9f3232aSChristoph Hellwig return -ENXIO; 310f9f3232aSChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); 3117249c1a5SChristoph Hellwig } 3127249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs); 313cf65a0f6SChristoph Hellwig 31433dcb37cSChristoph Hellwig #ifdef CONFIG_MMU 31533dcb37cSChristoph Hellwig /* 31633dcb37cSChristoph Hellwig * Return the page attributes used for mapping dma_alloc_* memory, either in 31733dcb37cSChristoph Hellwig * kernel space if remapping is needed, or to userspace through dma_mmap_*. 31833dcb37cSChristoph Hellwig */ 31933dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 32033dcb37cSChristoph Hellwig { 32117c4a2aeSThomas Hellstrom if (force_dma_unencrypted(dev)) 32217c4a2aeSThomas Hellstrom prot = pgprot_decrypted(prot); 32333dcb37cSChristoph Hellwig if (dev_is_dma_coherent(dev) || 32433dcb37cSChristoph Hellwig (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && 32533dcb37cSChristoph Hellwig (attrs & DMA_ATTR_NON_CONSISTENT))) 32633dcb37cSChristoph Hellwig return prot; 327419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE 328419e2f18SChristoph Hellwig if (attrs & DMA_ATTR_WRITE_COMBINE) 329419e2f18SChristoph Hellwig return pgprot_writecombine(prot); 330419e2f18SChristoph Hellwig #endif 331419e2f18SChristoph Hellwig return pgprot_dmacoherent(prot); 33233dcb37cSChristoph Hellwig } 33333dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */ 33433dcb37cSChristoph Hellwig 335cf65a0f6SChristoph Hellwig /* 336cf65a0f6SChristoph Hellwig * Create userspace mapping for the DMA-coherent memory. 337cf65a0f6SChristoph Hellwig */ 338cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 33958b04406SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 34058b04406SChristoph Hellwig unsigned long attrs) 341cf65a0f6SChristoph Hellwig { 34262fcee9aSChristoph Hellwig #ifdef CONFIG_MMU 343cf65a0f6SChristoph Hellwig unsigned long user_count = vma_pages(vma); 344cf65a0f6SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 345cf65a0f6SChristoph Hellwig unsigned long off = vma->vm_pgoff; 34658b04406SChristoph Hellwig int ret = -ENXIO; 347cf65a0f6SChristoph Hellwig 34833dcb37cSChristoph Hellwig vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 349cf65a0f6SChristoph Hellwig 350cf65a0f6SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 351cf65a0f6SChristoph Hellwig return ret; 352cf65a0f6SChristoph Hellwig 35358b04406SChristoph Hellwig if (off >= count || user_count > count - off) 35458b04406SChristoph Hellwig return -ENXIO; 355cf65a0f6SChristoph Hellwig 35634dc0ea6SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, 35734dc0ea6SChristoph Hellwig page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, 35858b04406SChristoph Hellwig user_count << PAGE_SHIFT, vma->vm_page_prot); 35958b04406SChristoph Hellwig #else 36058b04406SChristoph Hellwig return -ENXIO; 36162fcee9aSChristoph Hellwig #endif /* CONFIG_MMU */ 362cf65a0f6SChristoph Hellwig } 3637249c1a5SChristoph Hellwig 3647249c1a5SChristoph Hellwig /** 365e29ccc18SChristoph Hellwig * dma_can_mmap - check if a given device supports dma_mmap_* 366e29ccc18SChristoph Hellwig * @dev: device to check 367e29ccc18SChristoph Hellwig * 368e29ccc18SChristoph Hellwig * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to 369e29ccc18SChristoph Hellwig * map DMA allocations to userspace. 370e29ccc18SChristoph Hellwig */ 371e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev) 372e29ccc18SChristoph Hellwig { 373e29ccc18SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 374e29ccc18SChristoph Hellwig 37534dc0ea6SChristoph Hellwig if (dma_is_direct(ops)) 37634dc0ea6SChristoph Hellwig return dma_direct_can_mmap(dev); 377e29ccc18SChristoph Hellwig return ops->mmap != NULL; 378e29ccc18SChristoph Hellwig } 379e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap); 380e29ccc18SChristoph Hellwig 381e29ccc18SChristoph Hellwig /** 3827249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space 3837249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 3847249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping 3857249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 3867249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs 3877249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs 3887249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs 3897249c1a5SChristoph Hellwig * 3907249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user 3917249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the 3927249c1a5SChristoph Hellwig * user space mapping has been released. 3937249c1a5SChristoph Hellwig */ 3947249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 3957249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 3967249c1a5SChristoph Hellwig unsigned long attrs) 3977249c1a5SChristoph Hellwig { 3987249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 399356da6d0SChristoph Hellwig 400f9f3232aSChristoph Hellwig if (dma_is_direct(ops)) 40134dc0ea6SChristoph Hellwig return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, 402f9f3232aSChristoph Hellwig attrs); 403f9f3232aSChristoph Hellwig if (!ops->mmap) 404f9f3232aSChristoph Hellwig return -ENXIO; 4057249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 4067249c1a5SChristoph Hellwig } 4077249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs); 40805887cb6SChristoph Hellwig 40905887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev) 41005887cb6SChristoph Hellwig { 41105887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 41205887cb6SChristoph Hellwig 413356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 414356da6d0SChristoph Hellwig return dma_direct_get_required_mask(dev); 41505887cb6SChristoph Hellwig if (ops->get_required_mask) 41605887cb6SChristoph Hellwig return ops->get_required_mask(dev); 417249baa54SChristoph Hellwig 418249baa54SChristoph Hellwig /* 419249baa54SChristoph Hellwig * We require every DMA ops implementation to at least support a 32-bit 420249baa54SChristoph Hellwig * DMA mask (and use bounce buffering if that isn't supported in 421249baa54SChristoph Hellwig * hardware). As the direct mapping code has its own routine to 422249baa54SChristoph Hellwig * actually report an optimal mask we default to 32-bit here as that 423249baa54SChristoph Hellwig * is the right thing for most IOMMUs, and at least not actively 424249baa54SChristoph Hellwig * harmful in general. 425249baa54SChristoph Hellwig */ 426249baa54SChristoph Hellwig return DMA_BIT_MASK(32); 42705887cb6SChristoph Hellwig } 42805887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask); 42905887cb6SChristoph Hellwig 4307249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 4317249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs) 4327249c1a5SChristoph Hellwig { 4337249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 4347249c1a5SChristoph Hellwig void *cpu_addr; 4357249c1a5SChristoph Hellwig 436148a97d5SDan Carpenter WARN_ON_ONCE(!dev->coherent_dma_mask); 4377249c1a5SChristoph Hellwig 4387249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 4397249c1a5SChristoph Hellwig return cpu_addr; 4407249c1a5SChristoph Hellwig 4417249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */ 4427249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 4437249c1a5SChristoph Hellwig 444356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 445356da6d0SChristoph Hellwig cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 446356da6d0SChristoph Hellwig else if (ops->alloc) 447356da6d0SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 448356da6d0SChristoph Hellwig else 4497249c1a5SChristoph Hellwig return NULL; 4507249c1a5SChristoph Hellwig 4517249c1a5SChristoph Hellwig debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 4527249c1a5SChristoph Hellwig return cpu_addr; 4537249c1a5SChristoph Hellwig } 4547249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs); 4557249c1a5SChristoph Hellwig 4567249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 4577249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 4587249c1a5SChristoph Hellwig { 4597249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 4607249c1a5SChristoph Hellwig 4617249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 4627249c1a5SChristoph Hellwig return; 4637249c1a5SChristoph Hellwig /* 4647249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via 4657249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting 4667249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to 4677249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is 4687249c1a5SChristoph Hellwig * probably misusing the coherent API anyway. 4697249c1a5SChristoph Hellwig */ 4707249c1a5SChristoph Hellwig WARN_ON(irqs_disabled()); 4717249c1a5SChristoph Hellwig 472356da6d0SChristoph Hellwig if (!cpu_addr) 4737249c1a5SChristoph Hellwig return; 4747249c1a5SChristoph Hellwig 4757249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 476356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 477356da6d0SChristoph Hellwig dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); 478356da6d0SChristoph Hellwig else if (ops->free) 4797249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs); 4807249c1a5SChristoph Hellwig } 4817249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs); 4827249c1a5SChristoph Hellwig 4837249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask) 4847249c1a5SChristoph Hellwig { 4857249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 4867249c1a5SChristoph Hellwig 487356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 488356da6d0SChristoph Hellwig return dma_direct_supported(dev, mask); 4898b1cce9fSThierry Reding if (!ops->dma_supported) 4907249c1a5SChristoph Hellwig return 1; 4917249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask); 4927249c1a5SChristoph Hellwig } 4937249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported); 4947249c1a5SChristoph Hellwig 49511ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 49611ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask); 49711ddce15SChristoph Hellwig #else 49811ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask) do { } while (0) 49911ddce15SChristoph Hellwig #endif 50011ddce15SChristoph Hellwig 5017249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask) 5027249c1a5SChristoph Hellwig { 5034a54d16fSChristoph Hellwig /* 5044a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 5054a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 5064a54d16fSChristoph Hellwig */ 5074a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 5084a54d16fSChristoph Hellwig 5097249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask)) 5107249c1a5SChristoph Hellwig return -EIO; 5117249c1a5SChristoph Hellwig 51211ddce15SChristoph Hellwig arch_dma_set_mask(dev, mask); 5137249c1a5SChristoph Hellwig *dev->dma_mask = mask; 5147249c1a5SChristoph Hellwig return 0; 5157249c1a5SChristoph Hellwig } 5167249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask); 5177249c1a5SChristoph Hellwig 5187249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 5197249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask) 5207249c1a5SChristoph Hellwig { 5214a54d16fSChristoph Hellwig /* 5224a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 5234a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 5244a54d16fSChristoph Hellwig */ 5254a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 5264a54d16fSChristoph Hellwig 5277249c1a5SChristoph Hellwig if (!dma_supported(dev, mask)) 5287249c1a5SChristoph Hellwig return -EIO; 5297249c1a5SChristoph Hellwig 5307249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask; 5317249c1a5SChristoph Hellwig return 0; 5327249c1a5SChristoph Hellwig } 5337249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask); 5347249c1a5SChristoph Hellwig #endif 5358ddbe594SChristoph Hellwig 5368ddbe594SChristoph Hellwig void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 5378ddbe594SChristoph Hellwig enum dma_data_direction dir) 5388ddbe594SChristoph Hellwig { 5398ddbe594SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 5408ddbe594SChristoph Hellwig 5418ddbe594SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 542356da6d0SChristoph Hellwig 543356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 544356da6d0SChristoph Hellwig arch_dma_cache_sync(dev, vaddr, size, dir); 545356da6d0SChristoph Hellwig else if (ops->cache_sync) 5468ddbe594SChristoph Hellwig ops->cache_sync(dev, vaddr, size, dir); 5478ddbe594SChristoph Hellwig } 5488ddbe594SChristoph Hellwig EXPORT_SYMBOL(dma_cache_sync); 549133d624bSJoerg Roedel 550133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev) 551133d624bSJoerg Roedel { 552133d624bSJoerg Roedel const struct dma_map_ops *ops = get_dma_ops(dev); 553133d624bSJoerg Roedel size_t size = SIZE_MAX; 554133d624bSJoerg Roedel 555133d624bSJoerg Roedel if (dma_is_direct(ops)) 556133d624bSJoerg Roedel size = dma_direct_max_mapping_size(dev); 557133d624bSJoerg Roedel else if (ops && ops->max_mapping_size) 558133d624bSJoerg Roedel size = ops->max_mapping_size(dev); 559133d624bSJoerg Roedel 560133d624bSJoerg Roedel return size; 561133d624bSJoerg Roedel } 562133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size); 5636ba99411SYoshihiro Shimoda 5643aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 5653aa91625SChristoph Hellwig { 5663aa91625SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 5673aa91625SChristoph Hellwig 5683aa91625SChristoph Hellwig if (dma_is_direct(ops)) 5693aa91625SChristoph Hellwig return dma_direct_need_sync(dev, dma_addr); 5703aa91625SChristoph Hellwig return ops->sync_single_for_cpu || ops->sync_single_for_device; 5713aa91625SChristoph Hellwig } 5723aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync); 5733aa91625SChristoph Hellwig 5746ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev) 5756ba99411SYoshihiro Shimoda { 5766ba99411SYoshihiro Shimoda const struct dma_map_ops *ops = get_dma_ops(dev); 5776ba99411SYoshihiro Shimoda 5786ba99411SYoshihiro Shimoda if (!ops || !ops->get_merge_boundary) 5796ba99411SYoshihiro Shimoda return 0; /* can't merge */ 5806ba99411SYoshihiro Shimoda 5816ba99411SYoshihiro Shimoda return ops->get_merge_boundary(dev); 5826ba99411SYoshihiro Shimoda } 5836ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary); 584