1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2cf65a0f6SChristoph Hellwig /* 3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines 4cf65a0f6SChristoph Hellwig * 5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH 6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7cf65a0f6SChristoph Hellwig */ 805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */ 9cf65a0f6SChristoph Hellwig #include <linux/acpi.h> 10356da6d0SChristoph Hellwig #include <linux/dma-direct.h> 1158b04406SChristoph Hellwig #include <linux/dma-noncoherent.h> 12cf65a0f6SChristoph Hellwig #include <linux/export.h> 13cf65a0f6SChristoph Hellwig #include <linux/gfp.h> 14cf65a0f6SChristoph Hellwig #include <linux/of_device.h> 15cf65a0f6SChristoph Hellwig #include <linux/slab.h> 16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h> 17cf65a0f6SChristoph Hellwig 18cf65a0f6SChristoph Hellwig /* 19cf65a0f6SChristoph Hellwig * Managed DMA API 20cf65a0f6SChristoph Hellwig */ 21cf65a0f6SChristoph Hellwig struct dma_devres { 22cf65a0f6SChristoph Hellwig size_t size; 23cf65a0f6SChristoph Hellwig void *vaddr; 24cf65a0f6SChristoph Hellwig dma_addr_t dma_handle; 25cf65a0f6SChristoph Hellwig unsigned long attrs; 26cf65a0f6SChristoph Hellwig }; 27cf65a0f6SChristoph Hellwig 28cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res) 29cf65a0f6SChristoph Hellwig { 30cf65a0f6SChristoph Hellwig struct dma_devres *this = res; 31cf65a0f6SChristoph Hellwig 32cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 33cf65a0f6SChristoph Hellwig this->attrs); 34cf65a0f6SChristoph Hellwig } 35cf65a0f6SChristoph Hellwig 36cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data) 37cf65a0f6SChristoph Hellwig { 38cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data; 39cf65a0f6SChristoph Hellwig 40cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) { 41cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size || 42cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle); 43cf65a0f6SChristoph Hellwig return 1; 44cf65a0f6SChristoph Hellwig } 45cf65a0f6SChristoph Hellwig return 0; 46cf65a0f6SChristoph Hellwig } 47cf65a0f6SChristoph Hellwig 48cf65a0f6SChristoph Hellwig /** 49cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent() 50cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for 51cf65a0f6SChristoph Hellwig * @size: Size of allocation 52cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free 53cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free 54cf65a0f6SChristoph Hellwig * 55cf65a0f6SChristoph Hellwig * Managed dma_free_coherent(). 56cf65a0f6SChristoph Hellwig */ 57cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 58cf65a0f6SChristoph Hellwig dma_addr_t dma_handle) 59cf65a0f6SChristoph Hellwig { 60cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle }; 61cf65a0f6SChristoph Hellwig 62cf65a0f6SChristoph Hellwig dma_free_coherent(dev, size, vaddr, dma_handle); 63cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 64cf65a0f6SChristoph Hellwig } 65cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent); 66cf65a0f6SChristoph Hellwig 67cf65a0f6SChristoph Hellwig /** 68cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs() 69cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for 70cf65a0f6SChristoph Hellwig * @size: Size of allocation 71cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 72cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 73cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace. 74cf65a0f6SChristoph Hellwig * 75cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be 76cf65a0f6SChristoph Hellwig * automatically released on driver detach. 77cf65a0f6SChristoph Hellwig * 78cf65a0f6SChristoph Hellwig * RETURNS: 79cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 80cf65a0f6SChristoph Hellwig */ 81cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 82cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs) 83cf65a0f6SChristoph Hellwig { 84cf65a0f6SChristoph Hellwig struct dma_devres *dr; 85cf65a0f6SChristoph Hellwig void *vaddr; 86cf65a0f6SChristoph Hellwig 87cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 88cf65a0f6SChristoph Hellwig if (!dr) 89cf65a0f6SChristoph Hellwig return NULL; 90cf65a0f6SChristoph Hellwig 91cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 92cf65a0f6SChristoph Hellwig if (!vaddr) { 93cf65a0f6SChristoph Hellwig devres_free(dr); 94cf65a0f6SChristoph Hellwig return NULL; 95cf65a0f6SChristoph Hellwig } 96cf65a0f6SChristoph Hellwig 97cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 98cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 99cf65a0f6SChristoph Hellwig dr->size = size; 100cf65a0f6SChristoph Hellwig dr->attrs = attrs; 101cf65a0f6SChristoph Hellwig 102cf65a0f6SChristoph Hellwig devres_add(dev, dr); 103cf65a0f6SChristoph Hellwig 104cf65a0f6SChristoph Hellwig return vaddr; 105cf65a0f6SChristoph Hellwig } 106cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs); 107cf65a0f6SChristoph Hellwig 108cf65a0f6SChristoph Hellwig /* 109cf65a0f6SChristoph Hellwig * Create scatter-list for the already allocated DMA buffer. 110cf65a0f6SChristoph Hellwig */ 111cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 1129406a49fSChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 1139406a49fSChristoph Hellwig unsigned long attrs) 114cf65a0f6SChristoph Hellwig { 1159406a49fSChristoph Hellwig struct page *page; 116cf65a0f6SChristoph Hellwig int ret; 117cf65a0f6SChristoph Hellwig 1189406a49fSChristoph Hellwig if (!dev_is_dma_coherent(dev)) { 11966d7780fSChristoph Hellwig unsigned long pfn; 12066d7780fSChristoph Hellwig 1219406a49fSChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 1229406a49fSChristoph Hellwig return -ENXIO; 123cf65a0f6SChristoph Hellwig 12466d7780fSChristoph Hellwig /* If the PFN is not valid, we do not have a struct page */ 12566d7780fSChristoph Hellwig pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); 12666d7780fSChristoph Hellwig if (!pfn_valid(pfn)) 12766d7780fSChristoph Hellwig return -ENXIO; 12866d7780fSChristoph Hellwig page = pfn_to_page(pfn); 1299406a49fSChristoph Hellwig } else { 1309406a49fSChristoph Hellwig page = virt_to_page(cpu_addr); 1319406a49fSChristoph Hellwig } 1329406a49fSChristoph Hellwig 1339406a49fSChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1349406a49fSChristoph Hellwig if (!ret) 135cf65a0f6SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1369406a49fSChristoph Hellwig return ret; 137cf65a0f6SChristoph Hellwig } 1387249c1a5SChristoph Hellwig 1397249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 1407249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 1417249c1a5SChristoph Hellwig unsigned long attrs) 1427249c1a5SChristoph Hellwig { 1437249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 144356da6d0SChristoph Hellwig 145356da6d0SChristoph Hellwig if (!dma_is_direct(ops) && ops->get_sgtable) 1467249c1a5SChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 1477249c1a5SChristoph Hellwig attrs); 1487249c1a5SChristoph Hellwig return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 1497249c1a5SChristoph Hellwig attrs); 1507249c1a5SChristoph Hellwig } 1517249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs); 152cf65a0f6SChristoph Hellwig 153*33dcb37cSChristoph Hellwig #ifdef CONFIG_MMU 154*33dcb37cSChristoph Hellwig /* 155*33dcb37cSChristoph Hellwig * Return the page attributes used for mapping dma_alloc_* memory, either in 156*33dcb37cSChristoph Hellwig * kernel space if remapping is needed, or to userspace through dma_mmap_*. 157*33dcb37cSChristoph Hellwig */ 158*33dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 159*33dcb37cSChristoph Hellwig { 160*33dcb37cSChristoph Hellwig if (dev_is_dma_coherent(dev) || 161*33dcb37cSChristoph Hellwig (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && 162*33dcb37cSChristoph Hellwig (attrs & DMA_ATTR_NON_CONSISTENT))) 163*33dcb37cSChristoph Hellwig return prot; 164*33dcb37cSChristoph Hellwig if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) 165*33dcb37cSChristoph Hellwig return arch_dma_mmap_pgprot(dev, prot, attrs); 166*33dcb37cSChristoph Hellwig return pgprot_noncached(prot); 167*33dcb37cSChristoph Hellwig } 168*33dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */ 169*33dcb37cSChristoph Hellwig 170cf65a0f6SChristoph Hellwig /* 171cf65a0f6SChristoph Hellwig * Create userspace mapping for the DMA-coherent memory. 172cf65a0f6SChristoph Hellwig */ 173cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 17458b04406SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 17558b04406SChristoph Hellwig unsigned long attrs) 176cf65a0f6SChristoph Hellwig { 177cf65a0f6SChristoph Hellwig #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 178cf65a0f6SChristoph Hellwig unsigned long user_count = vma_pages(vma); 179cf65a0f6SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 180cf65a0f6SChristoph Hellwig unsigned long off = vma->vm_pgoff; 18158b04406SChristoph Hellwig unsigned long pfn; 18258b04406SChristoph Hellwig int ret = -ENXIO; 183cf65a0f6SChristoph Hellwig 184*33dcb37cSChristoph Hellwig vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 185cf65a0f6SChristoph Hellwig 186cf65a0f6SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 187cf65a0f6SChristoph Hellwig return ret; 188cf65a0f6SChristoph Hellwig 18958b04406SChristoph Hellwig if (off >= count || user_count > count - off) 19058b04406SChristoph Hellwig return -ENXIO; 191cf65a0f6SChristoph Hellwig 19258b04406SChristoph Hellwig if (!dev_is_dma_coherent(dev)) { 19358b04406SChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 19458b04406SChristoph Hellwig return -ENXIO; 19566d7780fSChristoph Hellwig 19666d7780fSChristoph Hellwig /* If the PFN is not valid, we do not have a struct page */ 19758b04406SChristoph Hellwig pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); 19866d7780fSChristoph Hellwig if (!pfn_valid(pfn)) 19966d7780fSChristoph Hellwig return -ENXIO; 20058b04406SChristoph Hellwig } else { 20158b04406SChristoph Hellwig pfn = page_to_pfn(virt_to_page(cpu_addr)); 20258b04406SChristoph Hellwig } 20358b04406SChristoph Hellwig 20458b04406SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 20558b04406SChristoph Hellwig user_count << PAGE_SHIFT, vma->vm_page_prot); 20658b04406SChristoph Hellwig #else 20758b04406SChristoph Hellwig return -ENXIO; 20858b04406SChristoph Hellwig #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 209cf65a0f6SChristoph Hellwig } 2107249c1a5SChristoph Hellwig 2117249c1a5SChristoph Hellwig /** 2127249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space 2137249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 2147249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping 2157249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 2167249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs 2177249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs 2187249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs 2197249c1a5SChristoph Hellwig * 2207249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user 2217249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the 2227249c1a5SChristoph Hellwig * user space mapping has been released. 2237249c1a5SChristoph Hellwig */ 2247249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 2257249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 2267249c1a5SChristoph Hellwig unsigned long attrs) 2277249c1a5SChristoph Hellwig { 2287249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 229356da6d0SChristoph Hellwig 230356da6d0SChristoph Hellwig if (!dma_is_direct(ops) && ops->mmap) 2317249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 2327249c1a5SChristoph Hellwig return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 2337249c1a5SChristoph Hellwig } 2347249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs); 23505887cb6SChristoph Hellwig 23605887cb6SChristoph Hellwig static u64 dma_default_get_required_mask(struct device *dev) 23705887cb6SChristoph Hellwig { 23805887cb6SChristoph Hellwig u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); 23905887cb6SChristoph Hellwig u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); 24005887cb6SChristoph Hellwig u64 mask; 24105887cb6SChristoph Hellwig 24205887cb6SChristoph Hellwig if (!high_totalram) { 24305887cb6SChristoph Hellwig /* convert to mask just covering totalram */ 24405887cb6SChristoph Hellwig low_totalram = (1 << (fls(low_totalram) - 1)); 24505887cb6SChristoph Hellwig low_totalram += low_totalram - 1; 24605887cb6SChristoph Hellwig mask = low_totalram; 24705887cb6SChristoph Hellwig } else { 24805887cb6SChristoph Hellwig high_totalram = (1 << (fls(high_totalram) - 1)); 24905887cb6SChristoph Hellwig high_totalram += high_totalram - 1; 25005887cb6SChristoph Hellwig mask = (((u64)high_totalram) << 32) + 0xffffffff; 25105887cb6SChristoph Hellwig } 25205887cb6SChristoph Hellwig return mask; 25305887cb6SChristoph Hellwig } 25405887cb6SChristoph Hellwig 25505887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev) 25605887cb6SChristoph Hellwig { 25705887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 25805887cb6SChristoph Hellwig 259356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 260356da6d0SChristoph Hellwig return dma_direct_get_required_mask(dev); 26105887cb6SChristoph Hellwig if (ops->get_required_mask) 26205887cb6SChristoph Hellwig return ops->get_required_mask(dev); 26305887cb6SChristoph Hellwig return dma_default_get_required_mask(dev); 26405887cb6SChristoph Hellwig } 26505887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask); 26605887cb6SChristoph Hellwig 2677249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 2687249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs) 2697249c1a5SChristoph Hellwig { 2707249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 2717249c1a5SChristoph Hellwig void *cpu_addr; 2727249c1a5SChristoph Hellwig 273148a97d5SDan Carpenter WARN_ON_ONCE(!dev->coherent_dma_mask); 2747249c1a5SChristoph Hellwig 2757249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 2767249c1a5SChristoph Hellwig return cpu_addr; 2777249c1a5SChristoph Hellwig 2787249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */ 2797249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 2807249c1a5SChristoph Hellwig 281356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 282356da6d0SChristoph Hellwig cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 283356da6d0SChristoph Hellwig else if (ops->alloc) 284356da6d0SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 285356da6d0SChristoph Hellwig else 2867249c1a5SChristoph Hellwig return NULL; 2877249c1a5SChristoph Hellwig 2887249c1a5SChristoph Hellwig debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 2897249c1a5SChristoph Hellwig return cpu_addr; 2907249c1a5SChristoph Hellwig } 2917249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs); 2927249c1a5SChristoph Hellwig 2937249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 2947249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 2957249c1a5SChristoph Hellwig { 2967249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 2977249c1a5SChristoph Hellwig 2987249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 2997249c1a5SChristoph Hellwig return; 3007249c1a5SChristoph Hellwig /* 3017249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via 3027249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting 3037249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to 3047249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is 3057249c1a5SChristoph Hellwig * probably misusing the coherent API anyway. 3067249c1a5SChristoph Hellwig */ 3077249c1a5SChristoph Hellwig WARN_ON(irqs_disabled()); 3087249c1a5SChristoph Hellwig 309356da6d0SChristoph Hellwig if (!cpu_addr) 3107249c1a5SChristoph Hellwig return; 3117249c1a5SChristoph Hellwig 3127249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 313356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 314356da6d0SChristoph Hellwig dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); 315356da6d0SChristoph Hellwig else if (ops->free) 3167249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs); 3177249c1a5SChristoph Hellwig } 3187249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs); 3197249c1a5SChristoph Hellwig 3207249c1a5SChristoph Hellwig static inline void dma_check_mask(struct device *dev, u64 mask) 3217249c1a5SChristoph Hellwig { 3227249c1a5SChristoph Hellwig if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) 3237249c1a5SChristoph Hellwig dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); 3247249c1a5SChristoph Hellwig } 3257249c1a5SChristoph Hellwig 3267249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask) 3277249c1a5SChristoph Hellwig { 3287249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 3297249c1a5SChristoph Hellwig 330356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 331356da6d0SChristoph Hellwig return dma_direct_supported(dev, mask); 3328b1cce9fSThierry Reding if (!ops->dma_supported) 3337249c1a5SChristoph Hellwig return 1; 3347249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask); 3357249c1a5SChristoph Hellwig } 3367249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported); 3377249c1a5SChristoph Hellwig 33811ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 33911ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask); 34011ddce15SChristoph Hellwig #else 34111ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask) do { } while (0) 34211ddce15SChristoph Hellwig #endif 34311ddce15SChristoph Hellwig 3447249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask) 3457249c1a5SChristoph Hellwig { 3464a54d16fSChristoph Hellwig /* 3474a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 3484a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 3494a54d16fSChristoph Hellwig */ 3504a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 3514a54d16fSChristoph Hellwig 3527249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask)) 3537249c1a5SChristoph Hellwig return -EIO; 3547249c1a5SChristoph Hellwig 35511ddce15SChristoph Hellwig arch_dma_set_mask(dev, mask); 3567249c1a5SChristoph Hellwig dma_check_mask(dev, mask); 3577249c1a5SChristoph Hellwig *dev->dma_mask = mask; 3587249c1a5SChristoph Hellwig return 0; 3597249c1a5SChristoph Hellwig } 3607249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask); 3617249c1a5SChristoph Hellwig 3627249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 3637249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask) 3647249c1a5SChristoph Hellwig { 3654a54d16fSChristoph Hellwig /* 3664a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to 3674a54d16fSChristoph Hellwig * avoid generating unsupportable addresses. 3684a54d16fSChristoph Hellwig */ 3694a54d16fSChristoph Hellwig mask = (dma_addr_t)mask; 3704a54d16fSChristoph Hellwig 3717249c1a5SChristoph Hellwig if (!dma_supported(dev, mask)) 3727249c1a5SChristoph Hellwig return -EIO; 3737249c1a5SChristoph Hellwig 3747249c1a5SChristoph Hellwig dma_check_mask(dev, mask); 3757249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask; 3767249c1a5SChristoph Hellwig return 0; 3777249c1a5SChristoph Hellwig } 3787249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask); 3797249c1a5SChristoph Hellwig #endif 3808ddbe594SChristoph Hellwig 3818ddbe594SChristoph Hellwig void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 3828ddbe594SChristoph Hellwig enum dma_data_direction dir) 3838ddbe594SChristoph Hellwig { 3848ddbe594SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 3858ddbe594SChristoph Hellwig 3868ddbe594SChristoph Hellwig BUG_ON(!valid_dma_direction(dir)); 387356da6d0SChristoph Hellwig 388356da6d0SChristoph Hellwig if (dma_is_direct(ops)) 389356da6d0SChristoph Hellwig arch_dma_cache_sync(dev, vaddr, size, dir); 390356da6d0SChristoph Hellwig else if (ops->cache_sync) 3918ddbe594SChristoph Hellwig ops->cache_sync(dev, vaddr, size, dir); 3928ddbe594SChristoph Hellwig } 3938ddbe594SChristoph Hellwig EXPORT_SYMBOL(dma_cache_sync); 394133d624bSJoerg Roedel 395133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev) 396133d624bSJoerg Roedel { 397133d624bSJoerg Roedel const struct dma_map_ops *ops = get_dma_ops(dev); 398133d624bSJoerg Roedel size_t size = SIZE_MAX; 399133d624bSJoerg Roedel 400133d624bSJoerg Roedel if (dma_is_direct(ops)) 401133d624bSJoerg Roedel size = dma_direct_max_mapping_size(dev); 402133d624bSJoerg Roedel else if (ops && ops->max_mapping_size) 403133d624bSJoerg Roedel size = ops->max_mapping_size(dev); 404133d624bSJoerg Roedel 405133d624bSJoerg Roedel return size; 406133d624bSJoerg Roedel } 407133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size); 408