1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2cf65a0f6SChristoph Hellwig /* 3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines 4cf65a0f6SChristoph Hellwig * 5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH 6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7cf65a0f6SChristoph Hellwig */ 805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */ 9cf65a0f6SChristoph Hellwig #include <linux/acpi.h> 10356da6d0SChristoph Hellwig #include <linux/dma-direct.h> 1158b04406SChristoph Hellwig #include <linux/dma-noncoherent.h> 12cf65a0f6SChristoph Hellwig #include <linux/export.h> 13cf65a0f6SChristoph Hellwig #include <linux/gfp.h> 14cf65a0f6SChristoph Hellwig #include <linux/of_device.h> 15cf65a0f6SChristoph Hellwig #include <linux/slab.h> 16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h> 17cf65a0f6SChristoph Hellwig 18cf65a0f6SChristoph Hellwig /* 19cf65a0f6SChristoph Hellwig * Managed DMA API 20cf65a0f6SChristoph Hellwig */ 21cf65a0f6SChristoph Hellwig struct dma_devres { 22cf65a0f6SChristoph Hellwig size_t size; 23cf65a0f6SChristoph Hellwig void *vaddr; 24cf65a0f6SChristoph Hellwig dma_addr_t dma_handle; 25cf65a0f6SChristoph Hellwig unsigned long attrs; 26cf65a0f6SChristoph Hellwig }; 27cf65a0f6SChristoph Hellwig 28cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res) 29cf65a0f6SChristoph Hellwig { 30cf65a0f6SChristoph Hellwig struct dma_devres *this = res; 31cf65a0f6SChristoph Hellwig 32cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 33cf65a0f6SChristoph Hellwig this->attrs); 34cf65a0f6SChristoph Hellwig } 35cf65a0f6SChristoph Hellwig 36cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data) 37cf65a0f6SChristoph Hellwig { 38cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data; 39cf65a0f6SChristoph Hellwig 40cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) { 41cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size || 42cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle); 43cf65a0f6SChristoph Hellwig return 1; 44cf65a0f6SChristoph Hellwig } 45cf65a0f6SChristoph Hellwig return 0; 46cf65a0f6SChristoph Hellwig } 47cf65a0f6SChristoph Hellwig 48cf65a0f6SChristoph Hellwig /** 49cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent() 50cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for 51cf65a0f6SChristoph Hellwig * @size: Size of allocation 52cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free 53cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free 54cf65a0f6SChristoph Hellwig * 55cf65a0f6SChristoph Hellwig * Managed dma_free_coherent(). 56cf65a0f6SChristoph Hellwig */ 57cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 58cf65a0f6SChristoph Hellwig dma_addr_t dma_handle) 59cf65a0f6SChristoph Hellwig { 60cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle }; 61cf65a0f6SChristoph Hellwig 62cf65a0f6SChristoph Hellwig dma_free_coherent(dev, size, vaddr, dma_handle); 63cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 64cf65a0f6SChristoph Hellwig } 65cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent); 66cf65a0f6SChristoph Hellwig 67cf65a0f6SChristoph Hellwig /** 68cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs() 69cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for 70cf65a0f6SChristoph Hellwig * @size: Size of allocation 71cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 72cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 73cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace. 74cf65a0f6SChristoph Hellwig * 75cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be 76cf65a0f6SChristoph Hellwig * automatically released on driver detach. 77cf65a0f6SChristoph Hellwig * 78cf65a0f6SChristoph Hellwig * RETURNS: 79cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 80cf65a0f6SChristoph Hellwig */ 81cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 82cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs) 83cf65a0f6SChristoph Hellwig { 84cf65a0f6SChristoph Hellwig struct dma_devres *dr; 85cf65a0f6SChristoph Hellwig void *vaddr; 86cf65a0f6SChristoph Hellwig 87cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 88cf65a0f6SChristoph Hellwig if (!dr) 89cf65a0f6SChristoph Hellwig return NULL; 90cf65a0f6SChristoph Hellwig 91cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 92cf65a0f6SChristoph Hellwig if (!vaddr) { 93cf65a0f6SChristoph Hellwig devres_free(dr); 94cf65a0f6SChristoph Hellwig return NULL; 95cf65a0f6SChristoph Hellwig } 96cf65a0f6SChristoph Hellwig 97cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 98cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 99cf65a0f6SChristoph Hellwig dr->size = size; 100cf65a0f6SChristoph Hellwig dr->attrs = attrs; 101cf65a0f6SChristoph Hellwig 102cf65a0f6SChristoph Hellwig devres_add(dev, dr); 103cf65a0f6SChristoph Hellwig 104cf65a0f6SChristoph Hellwig return vaddr; 105cf65a0f6SChristoph Hellwig } 106cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs); 107cf65a0f6SChristoph Hellwig 108cf65a0f6SChristoph Hellwig /* 109cf65a0f6SChristoph Hellwig * Create scatter-list for the already allocated DMA buffer. 110cf65a0f6SChristoph Hellwig */ 111cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 1129406a49fSChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 1139406a49fSChristoph Hellwig unsigned long attrs) 114cf65a0f6SChristoph Hellwig { 11534dc0ea6SChristoph Hellwig struct page *page = virt_to_page(cpu_addr); 116cf65a0f6SChristoph Hellwig int ret; 117cf65a0f6SChristoph Hellwig 1189406a49fSChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1199406a49fSChristoph Hellwig if (!ret) 120cf65a0f6SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1219406a49fSChristoph Hellwig return ret; 122cf65a0f6SChristoph Hellwig } 1237249c1a5SChristoph Hellwig 12414451467SChristoph Hellwig /* 12514451467SChristoph Hellwig * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems 12614451467SChristoph Hellwig * that the intention is to allow exporting memory allocated via the 12714451467SChristoph Hellwig * coherent DMA APIs through the dma_buf API, which only accepts a 12814451467SChristoph Hellwig * scattertable. This presents a couple of problems: 12914451467SChristoph Hellwig * 1. Not all memory allocated via the coherent DMA APIs is backed by 13014451467SChristoph Hellwig * a struct page 13114451467SChristoph Hellwig * 2. Passing coherent DMA memory into the streaming APIs is not allowed 13214451467SChristoph Hellwig * as we will try to flush the memory through a different alias to that 13314451467SChristoph Hellwig * actually being used (and the flushes are redundant.) 13414451467SChristoph Hellwig */ 1357249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 1367249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 1377249c1a5SChristoph Hellwig unsigned long attrs) 1387249c1a5SChristoph Hellwig { 1397249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 140356da6d0SChristoph Hellwig 141f9f3232aSChristoph Hellwig if (dma_is_direct(ops)) 14234dc0ea6SChristoph Hellwig return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, 143f9f3232aSChristoph Hellwig size, attrs); 144f9f3232aSChristoph Hellwig if (!ops->get_sgtable) 145f9f3232aSChristoph Hellwig return -ENXIO; 146f9f3232aSChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); 1477249c1a5SChristoph Hellwig } 1487249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs); 149cf65a0f6SChristoph Hellwig 15033dcb37cSChristoph Hellwig #ifdef CONFIG_MMU 15133dcb37cSChristoph Hellwig /* 15233dcb37cSChristoph Hellwig * Return the page attributes used for mapping dma_alloc_* memory, either in 15333dcb37cSChristoph Hellwig * kernel space if remapping is needed, or to userspace through dma_mmap_*. 15433dcb37cSChristoph Hellwig */ 15533dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 15633dcb37cSChristoph Hellwig { 157*17c4a2aeSThomas Hellstrom if (force_dma_unencrypted(dev)) 158*17c4a2aeSThomas Hellstrom prot = pgprot_decrypted(prot); 15933dcb37cSChristoph Hellwig if (dev_is_dma_coherent(dev) || 16033dcb37cSChristoph Hellwig (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && 16133dcb37cSChristoph Hellwig (attrs & DMA_ATTR_NON_CONSISTENT))) 16233dcb37cSChristoph Hellwig return prot; 163419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE 164419e2f18SChristoph Hellwig if (attrs & DMA_ATTR_WRITE_COMBINE) 165419e2f18SChristoph Hellwig return pgprot_writecombine(prot); 166419e2f18SChristoph Hellwig #endif 167419e2f18SChristoph Hellwig return pgprot_dmacoherent(prot); 16833dcb37cSChristoph Hellwig } 16933dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */ 17033dcb37cSChristoph Hellwig 171cf65a0f6SChristoph Hellwig /* 172cf65a0f6SChristoph Hellwig * Create userspace mapping for the DMA-coherent memory. 173cf65a0f6SChristoph Hellwig */ 174cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 17558b04406SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 17658b04406SChristoph Hellwig unsigned long attrs) 177cf65a0f6SChristoph Hellwig { 17862fcee9aSChristoph Hellwig #ifdef CONFIG_MMU 179cf65a0f6SChristoph Hellwig unsigned long user_count = vma_pages(vma); 180cf65a0f6SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 181cf65a0f6SChristoph Hellwig unsigned long off = vma->vm_pgoff; 18258b04406SChristoph Hellwig int ret = -ENXIO; 183cf65a0f6SChristoph Hellwig 18433dcb37cSChristoph Hellwig vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 185cf65a0f6SChristoph Hellwig 186cf65a0f6SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 187cf65a0f6SChristoph Hellwig return ret; 188cf65a0f6SChristoph Hellwig 18958b04406SChristoph Hellwig if (off >= count || user_count > count - off) 19058b04406SChristoph Hellwig return -ENXIO; 191cf65a0f6SChristoph Hellwig 19234dc0ea6SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, 19334dc0ea6SChristoph Hellwig page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, 19458b04406SChristoph Hellwig user_count << PAGE_SHIFT, vma->vm_page_prot); 19558b04406SChristoph Hellwig #else 19658b04406SChristoph Hellwig return -ENXIO; 19762fcee9aSChristoph Hellwig #endif /* CONFIG_MMU */ 198cf65a0f6SChristoph Hellwig } 1997249c1a5SChristoph Hellwig 2007249c1a5SChristoph Hellwig /** 201e29ccc18SChristoph Hellwig * dma_can_mmap - check if a given device supports dma_mmap_* 202e29ccc18SChristoph Hellwig * @dev: device to check 203e29ccc18SChristoph Hellwig * 204e29ccc18SChristoph Hellwig * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to 205e29ccc18SChristoph Hellwig * map DMA allocations to userspace. 206e29ccc18SChristoph Hellwig */ 207e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev) 208e29ccc18SChristoph Hellwig { 209e29ccc18SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 210e29ccc18SChristoph Hellwig 21134dc0ea6SChristoph Hellwig if (dma_is_direct(ops)) 21234dc0ea6SChristoph Hellwig return dma_direct_can_mmap(dev); 213e29ccc18SChristoph Hellwig return ops->mmap != NULL; 214e29ccc18SChristoph Hellwig } 215e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap); 216e29ccc18SChristoph Hellwig 217e29ccc18SChristoph Hellwig /** 2187249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space 2197249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 2207249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping 2217249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 2227249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs 2237249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs 2247249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs 2257249c1a5SChristoph Hellwig * 2267249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user 2277249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the 2287249c1a5SChristoph Hellwig * user space mapping has been released. 2297249c1a5SChristoph Hellwig */ 2307249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 2317249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 2327249c1a5SChristoph Hellwig unsigned long attrs) 2337249c1a5SChristoph Hellwig { 2347249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 235356da6d0SChristoph Hellwig 236f9f3232aSChristoph Hellwig if (dma_is_direct(ops)) 23734dc0ea6SChristoph Hellwig return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, 238f9f3232aSChristoph Hellwig attrs); 239f9f3232aSChristoph Hellwig if (!ops->mmap) 240f9f3232aSChristoph Hellwig return -ENXIO; 2417249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 2427249c1a5SChristoph Hellwig } 2437249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs); 24405887cb6SChristoph Hellwig 24505887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev) 24605887cb6SChristoph Hellwig { 24705887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 24805887cb6SChristoph Hellwig 249356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 250356da6d0SChristoph Hellwig return dma_direct_get_required_mask(dev); 25105887cb6SChristoph Hellwig if (ops->get_required_mask) 25205887cb6SChristoph Hellwig return ops->get_required_mask(dev); 253249baa54SChristoph Hellwig 254249baa54SChristoph Hellwig /* 255249baa54SChristoph Hellwig * We require every DMA ops implementation to at least support a 32-bit 256249baa54SChristoph Hellwig * DMA mask (and use bounce buffering if that isn't supported in 257249baa54SChristoph Hellwig * hardware). As the direct mapping code has its own routine to 258249baa54SChristoph Hellwig * actually report an optimal mask we default to 32-bit here as that 259249baa54SChristoph Hellwig * is the right thing for most IOMMUs, and at least not actively 260249baa54SChristoph Hellwig * harmful in general. 261249baa54SChristoph Hellwig */ 262249baa54SChristoph Hellwig return DMA_BIT_MASK(32); 26305887cb6SChristoph Hellwig } 26405887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask); 26505887cb6SChristoph Hellwig 2667249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 2677249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs) 2687249c1a5SChristoph Hellwig { 2697249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 2707249c1a5SChristoph Hellwig void *cpu_addr; 2717249c1a5SChristoph Hellwig 272148a97d5SDan Carpenter WARN_ON_ONCE(!dev->coherent_dma_mask); 2737249c1a5SChristoph Hellwig 2747249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 2757249c1a5SChristoph Hellwig return cpu_addr; 2767249c1a5SChristoph Hellwig 2777249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */ 2787249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 2797249c1a5SChristoph Hellwig 280356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 281356da6d0SChristoph Hellwig cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 282356da6d0SChristoph Hellwig else if (ops->alloc) 283356da6d0SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 284356da6d0SChristoph Hellwig else 2857249c1a5SChristoph Hellwig return NULL; 2867249c1a5SChristoph Hellwig 2877249c1a5SChristoph Hellwig debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 2887249c1a5SChristoph Hellwig return cpu_addr; 2897249c1a5SChristoph Hellwig } 2907249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs); 2917249c1a5SChristoph Hellwig 2927249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 2937249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 2947249c1a5SChristoph Hellwig { 2957249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 2967249c1a5SChristoph Hellwig 2977249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 2987249c1a5SChristoph Hellwig return; 2997249c1a5SChristoph Hellwig /* 3007249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via 3017249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting 3027249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to 3037249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is 3047249c1a5SChristoph Hellwig * probably misusing the coherent API anyway. 3057249c1a5SChristoph Hellwig */ 3067249c1a5SChristoph Hellwig WARN_ON(irqs_disabled()); 3077249c1a5SChristoph Hellwig 308356da6d0SChristoph Hellwig if (!cpu_addr) 3097249c1a5SChristoph Hellwig return; 3107249c1a5SChristoph Hellwig 3117249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 312356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 313356da6d0SChristoph Hellwig dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); 314356da6d0SChristoph Hellwig else if (ops->free) 3157249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs); 3167249c1a5SChristoph Hellwig } 3177249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs); 3187249c1a5SChristoph Hellwig 3197249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask) 3207249c1a5SChristoph Hellwig { 3217249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 3227249c1a5SChristoph Hellwig 323356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 324356da6d0SChristoph Hellwig return dma_direct_supported(dev, mask); 3258b1cce9fSThierry Reding if (!ops->dma_supported) 3267249c1a5SChristoph Hellwig return 1; 3277249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask); 3287249c1a5SChristoph Hellwig } 3297249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported); 3307249c1a5SChristoph Hellwig 33111ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 33211ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask); 33311ddce15SChristoph Hellwig #else 33411ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask) do { } while (0) 33511ddce15SChristoph Hellwig #endif 33611ddce15SChristoph Hellwig 3377249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask) 3387249c1a5SChristoph Hellwig { 3394a54d16fSChristoph Hellwig /* 3404a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 3414a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 3424a54d16fSChristoph Hellwig */ 3434a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 3444a54d16fSChristoph Hellwig 3457249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask)) 3467249c1a5SChristoph Hellwig return -EIO; 3477249c1a5SChristoph Hellwig 34811ddce15SChristoph Hellwig arch_dma_set_mask(dev, mask); 3497249c1a5SChristoph Hellwig *dev->dma_mask = mask; 3507249c1a5SChristoph Hellwig return 0; 3517249c1a5SChristoph Hellwig } 3527249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask); 3537249c1a5SChristoph Hellwig 3547249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 3557249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask) 3567249c1a5SChristoph Hellwig { 3574a54d16fSChristoph Hellwig /* 3584a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 3594a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 3604a54d16fSChristoph Hellwig */ 3614a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 3624a54d16fSChristoph Hellwig 3637249c1a5SChristoph Hellwig if (!dma_supported(dev, mask)) 3647249c1a5SChristoph Hellwig return -EIO; 3657249c1a5SChristoph Hellwig 3667249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask; 3677249c1a5SChristoph Hellwig return 0; 3687249c1a5SChristoph Hellwig } 3697249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask); 3707249c1a5SChristoph Hellwig #endif 3718ddbe594SChristoph Hellwig 3728ddbe594SChristoph Hellwig void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 3738ddbe594SChristoph Hellwig enum dma_data_direction dir) 3748ddbe594SChristoph Hellwig { 3758ddbe594SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 3768ddbe594SChristoph Hellwig 3778ddbe594SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 378356da6d0SChristoph Hellwig 379356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 380356da6d0SChristoph Hellwig arch_dma_cache_sync(dev, vaddr, size, dir); 381356da6d0SChristoph Hellwig else if (ops->cache_sync) 3828ddbe594SChristoph Hellwig ops->cache_sync(dev, vaddr, size, dir); 3838ddbe594SChristoph Hellwig } 3848ddbe594SChristoph Hellwig EXPORT_SYMBOL(dma_cache_sync); 385133d624bSJoerg Roedel 386133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev) 387133d624bSJoerg Roedel { 388133d624bSJoerg Roedel const struct dma_map_ops *ops = get_dma_ops(dev); 389133d624bSJoerg Roedel size_t size = SIZE_MAX; 390133d624bSJoerg Roedel 391133d624bSJoerg Roedel if (dma_is_direct(ops)) 392133d624bSJoerg Roedel size = dma_direct_max_mapping_size(dev); 393133d624bSJoerg Roedel else if (ops && ops->max_mapping_size) 394133d624bSJoerg Roedel size = ops->max_mapping_size(dev); 395133d624bSJoerg Roedel 396133d624bSJoerg Roedel return size; 397133d624bSJoerg Roedel } 398133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size); 3996ba99411SYoshihiro Shimoda 4006ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev) 4016ba99411SYoshihiro Shimoda { 4026ba99411SYoshihiro Shimoda const struct dma_map_ops *ops = get_dma_ops(dev); 4036ba99411SYoshihiro Shimoda 4046ba99411SYoshihiro Shimoda if (!ops || !ops->get_merge_boundary) 4056ba99411SYoshihiro Shimoda return 0; /* can't merge */ 4066ba99411SYoshihiro Shimoda 4076ba99411SYoshihiro Shimoda return ops->get_merge_boundary(dev); 4086ba99411SYoshihiro Shimoda } 4096ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary); 410