1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 2cf65a0f6SChristoph Hellwig /* 3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines 4cf65a0f6SChristoph Hellwig * 5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH 6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7cf65a0f6SChristoph Hellwig */ 805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */ 9cf65a0f6SChristoph Hellwig #include <linux/acpi.h> 1058b04406SChristoph Hellwig #include <linux/dma-noncoherent.h> 11cf65a0f6SChristoph Hellwig #include <linux/export.h> 12cf65a0f6SChristoph Hellwig #include <linux/gfp.h> 13cf65a0f6SChristoph Hellwig #include <linux/of_device.h> 14cf65a0f6SChristoph Hellwig #include <linux/slab.h> 15cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h> 16cf65a0f6SChristoph Hellwig 17cf65a0f6SChristoph Hellwig /* 18cf65a0f6SChristoph Hellwig * Managed DMA API 19cf65a0f6SChristoph Hellwig */ 20cf65a0f6SChristoph Hellwig struct dma_devres { 21cf65a0f6SChristoph Hellwig size_t size; 22cf65a0f6SChristoph Hellwig void *vaddr; 23cf65a0f6SChristoph Hellwig dma_addr_t dma_handle; 24cf65a0f6SChristoph Hellwig unsigned long attrs; 25cf65a0f6SChristoph Hellwig }; 26cf65a0f6SChristoph Hellwig 27cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res) 28cf65a0f6SChristoph Hellwig { 29cf65a0f6SChristoph Hellwig struct dma_devres *this = res; 30cf65a0f6SChristoph Hellwig 31cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, 32cf65a0f6SChristoph Hellwig this->attrs); 33cf65a0f6SChristoph Hellwig } 34cf65a0f6SChristoph Hellwig 35cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data) 36cf65a0f6SChristoph Hellwig { 37cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data; 38cf65a0f6SChristoph Hellwig 39cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) { 40cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size || 41cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle); 42cf65a0f6SChristoph Hellwig return 1; 43cf65a0f6SChristoph Hellwig } 44cf65a0f6SChristoph Hellwig return 0; 45cf65a0f6SChristoph Hellwig } 46cf65a0f6SChristoph Hellwig 47cf65a0f6SChristoph Hellwig /** 48cf65a0f6SChristoph Hellwig * dmam_alloc_coherent - Managed dma_alloc_coherent() 49cf65a0f6SChristoph Hellwig * @dev: Device to allocate coherent memory for 50cf65a0f6SChristoph Hellwig * @size: Size of allocation 51cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 52cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 53cf65a0f6SChristoph Hellwig * 54cf65a0f6SChristoph Hellwig * Managed dma_alloc_coherent(). Memory allocated using this function 55cf65a0f6SChristoph Hellwig * will be automatically released on driver detach. 56cf65a0f6SChristoph Hellwig * 57cf65a0f6SChristoph Hellwig * RETURNS: 58cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 59cf65a0f6SChristoph Hellwig */ 60cf65a0f6SChristoph Hellwig void *dmam_alloc_coherent(struct device *dev, size_t size, 61cf65a0f6SChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp) 62cf65a0f6SChristoph Hellwig { 63cf65a0f6SChristoph Hellwig struct dma_devres *dr; 64cf65a0f6SChristoph Hellwig void *vaddr; 65cf65a0f6SChristoph Hellwig 66cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 67cf65a0f6SChristoph Hellwig if (!dr) 68cf65a0f6SChristoph Hellwig return NULL; 69cf65a0f6SChristoph Hellwig 70cf65a0f6SChristoph Hellwig vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); 71cf65a0f6SChristoph Hellwig if (!vaddr) { 72cf65a0f6SChristoph Hellwig devres_free(dr); 73cf65a0f6SChristoph Hellwig return NULL; 74cf65a0f6SChristoph Hellwig } 75cf65a0f6SChristoph Hellwig 76cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 77cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 78cf65a0f6SChristoph Hellwig dr->size = size; 79cf65a0f6SChristoph Hellwig 80cf65a0f6SChristoph Hellwig devres_add(dev, dr); 81cf65a0f6SChristoph Hellwig 82cf65a0f6SChristoph Hellwig return vaddr; 83cf65a0f6SChristoph Hellwig } 84cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_coherent); 85cf65a0f6SChristoph Hellwig 86cf65a0f6SChristoph Hellwig /** 87cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent() 88cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for 89cf65a0f6SChristoph Hellwig * @size: Size of allocation 90cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free 91cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free 92cf65a0f6SChristoph Hellwig * 93cf65a0f6SChristoph Hellwig * Managed dma_free_coherent(). 94cf65a0f6SChristoph Hellwig */ 95cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 96cf65a0f6SChristoph Hellwig dma_addr_t dma_handle) 97cf65a0f6SChristoph Hellwig { 98cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle }; 99cf65a0f6SChristoph Hellwig 100cf65a0f6SChristoph Hellwig dma_free_coherent(dev, size, vaddr, dma_handle); 101cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); 102cf65a0f6SChristoph Hellwig } 103cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent); 104cf65a0f6SChristoph Hellwig 105cf65a0f6SChristoph Hellwig /** 106cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs() 107cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for 108cf65a0f6SChristoph Hellwig * @size: Size of allocation 109cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 110cf65a0f6SChristoph Hellwig * @gfp: Allocation flags 111cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace. 112cf65a0f6SChristoph Hellwig * 113cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be 114cf65a0f6SChristoph Hellwig * automatically released on driver detach. 115cf65a0f6SChristoph Hellwig * 116cf65a0f6SChristoph Hellwig * RETURNS: 117cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure. 118cf65a0f6SChristoph Hellwig */ 119cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 120cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs) 121cf65a0f6SChristoph Hellwig { 122cf65a0f6SChristoph Hellwig struct dma_devres *dr; 123cf65a0f6SChristoph Hellwig void *vaddr; 124cf65a0f6SChristoph Hellwig 125cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp); 126cf65a0f6SChristoph Hellwig if (!dr) 127cf65a0f6SChristoph Hellwig return NULL; 128cf65a0f6SChristoph Hellwig 129cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); 130cf65a0f6SChristoph Hellwig if (!vaddr) { 131cf65a0f6SChristoph Hellwig devres_free(dr); 132cf65a0f6SChristoph Hellwig return NULL; 133cf65a0f6SChristoph Hellwig } 134cf65a0f6SChristoph Hellwig 135cf65a0f6SChristoph Hellwig dr->vaddr = vaddr; 136cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle; 137cf65a0f6SChristoph Hellwig dr->size = size; 138cf65a0f6SChristoph Hellwig dr->attrs = attrs; 139cf65a0f6SChristoph Hellwig 140cf65a0f6SChristoph Hellwig devres_add(dev, dr); 141cf65a0f6SChristoph Hellwig 142cf65a0f6SChristoph Hellwig return vaddr; 143cf65a0f6SChristoph Hellwig } 144cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs); 145cf65a0f6SChristoph Hellwig 146cf65a0f6SChristoph Hellwig #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 147cf65a0f6SChristoph Hellwig 148cf65a0f6SChristoph Hellwig static void dmam_coherent_decl_release(struct device *dev, void *res) 149cf65a0f6SChristoph Hellwig { 150cf65a0f6SChristoph Hellwig dma_release_declared_memory(dev); 151cf65a0f6SChristoph Hellwig } 152cf65a0f6SChristoph Hellwig 153cf65a0f6SChristoph Hellwig /** 154cf65a0f6SChristoph Hellwig * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() 155cf65a0f6SChristoph Hellwig * @dev: Device to declare coherent memory for 156cf65a0f6SChristoph Hellwig * @phys_addr: Physical address of coherent memory to be declared 157cf65a0f6SChristoph Hellwig * @device_addr: Device address of coherent memory to be declared 158cf65a0f6SChristoph Hellwig * @size: Size of coherent memory to be declared 159cf65a0f6SChristoph Hellwig * @flags: Flags 160cf65a0f6SChristoph Hellwig * 161cf65a0f6SChristoph Hellwig * Managed dma_declare_coherent_memory(). 162cf65a0f6SChristoph Hellwig * 163cf65a0f6SChristoph Hellwig * RETURNS: 164cf65a0f6SChristoph Hellwig * 0 on success, -errno on failure. 165cf65a0f6SChristoph Hellwig */ 166cf65a0f6SChristoph Hellwig int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 167cf65a0f6SChristoph Hellwig dma_addr_t device_addr, size_t size, int flags) 168cf65a0f6SChristoph Hellwig { 169cf65a0f6SChristoph Hellwig void *res; 170cf65a0f6SChristoph Hellwig int rc; 171cf65a0f6SChristoph Hellwig 172cf65a0f6SChristoph Hellwig res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); 173cf65a0f6SChristoph Hellwig if (!res) 174cf65a0f6SChristoph Hellwig return -ENOMEM; 175cf65a0f6SChristoph Hellwig 176cf65a0f6SChristoph Hellwig rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, 177cf65a0f6SChristoph Hellwig flags); 178cf65a0f6SChristoph Hellwig if (!rc) 179cf65a0f6SChristoph Hellwig devres_add(dev, res); 180cf65a0f6SChristoph Hellwig else 181cf65a0f6SChristoph Hellwig devres_free(res); 182cf65a0f6SChristoph Hellwig 183cf65a0f6SChristoph Hellwig return rc; 184cf65a0f6SChristoph Hellwig } 185cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_declare_coherent_memory); 186cf65a0f6SChristoph Hellwig 187cf65a0f6SChristoph Hellwig /** 188cf65a0f6SChristoph Hellwig * dmam_release_declared_memory - Managed dma_release_declared_memory(). 189cf65a0f6SChristoph Hellwig * @dev: Device to release declared coherent memory for 190cf65a0f6SChristoph Hellwig * 191cf65a0f6SChristoph Hellwig * Managed dmam_release_declared_memory(). 192cf65a0f6SChristoph Hellwig */ 193cf65a0f6SChristoph Hellwig void dmam_release_declared_memory(struct device *dev) 194cf65a0f6SChristoph Hellwig { 195cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); 196cf65a0f6SChristoph Hellwig } 197cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_release_declared_memory); 198cf65a0f6SChristoph Hellwig 199cf65a0f6SChristoph Hellwig #endif 200cf65a0f6SChristoph Hellwig 201cf65a0f6SChristoph Hellwig /* 202cf65a0f6SChristoph Hellwig * Create scatter-list for the already allocated DMA buffer. 203cf65a0f6SChristoph Hellwig */ 204cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 2059406a49fSChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 2069406a49fSChristoph Hellwig unsigned long attrs) 207cf65a0f6SChristoph Hellwig { 2089406a49fSChristoph Hellwig struct page *page; 209cf65a0f6SChristoph Hellwig int ret; 210cf65a0f6SChristoph Hellwig 2119406a49fSChristoph Hellwig if (!dev_is_dma_coherent(dev)) { 2129406a49fSChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 2139406a49fSChristoph Hellwig return -ENXIO; 214cf65a0f6SChristoph Hellwig 2159406a49fSChristoph Hellwig page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, 2169406a49fSChristoph Hellwig dma_addr)); 2179406a49fSChristoph Hellwig } else { 2189406a49fSChristoph Hellwig page = virt_to_page(cpu_addr); 2199406a49fSChristoph Hellwig } 2209406a49fSChristoph Hellwig 2219406a49fSChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 2229406a49fSChristoph Hellwig if (!ret) 223cf65a0f6SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 2249406a49fSChristoph Hellwig return ret; 225cf65a0f6SChristoph Hellwig } 226*7249c1a5SChristoph Hellwig 227*7249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 228*7249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 229*7249c1a5SChristoph Hellwig unsigned long attrs) 230*7249c1a5SChristoph Hellwig { 231*7249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 232*7249c1a5SChristoph Hellwig BUG_ON(!ops); 233*7249c1a5SChristoph Hellwig if (ops->get_sgtable) 234*7249c1a5SChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 235*7249c1a5SChristoph Hellwig attrs); 236*7249c1a5SChristoph Hellwig return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 237*7249c1a5SChristoph Hellwig attrs); 238*7249c1a5SChristoph Hellwig } 239*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs); 240cf65a0f6SChristoph Hellwig 241cf65a0f6SChristoph Hellwig /* 242cf65a0f6SChristoph Hellwig * Create userspace mapping for the DMA-coherent memory. 243cf65a0f6SChristoph Hellwig */ 244cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 24558b04406SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 24658b04406SChristoph Hellwig unsigned long attrs) 247cf65a0f6SChristoph Hellwig { 248cf65a0f6SChristoph Hellwig #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 249cf65a0f6SChristoph Hellwig unsigned long user_count = vma_pages(vma); 250cf65a0f6SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 251cf65a0f6SChristoph Hellwig unsigned long off = vma->vm_pgoff; 25258b04406SChristoph Hellwig unsigned long pfn; 25358b04406SChristoph Hellwig int ret = -ENXIO; 254cf65a0f6SChristoph Hellwig 25558b04406SChristoph Hellwig vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 256cf65a0f6SChristoph Hellwig 257cf65a0f6SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 258cf65a0f6SChristoph Hellwig return ret; 259cf65a0f6SChristoph Hellwig 26058b04406SChristoph Hellwig if (off >= count || user_count > count - off) 26158b04406SChristoph Hellwig return -ENXIO; 262cf65a0f6SChristoph Hellwig 26358b04406SChristoph Hellwig if (!dev_is_dma_coherent(dev)) { 26458b04406SChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) 26558b04406SChristoph Hellwig return -ENXIO; 26658b04406SChristoph Hellwig pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); 26758b04406SChristoph Hellwig } else { 26858b04406SChristoph Hellwig pfn = page_to_pfn(virt_to_page(cpu_addr)); 26958b04406SChristoph Hellwig } 27058b04406SChristoph Hellwig 27158b04406SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 27258b04406SChristoph Hellwig user_count << PAGE_SHIFT, vma->vm_page_prot); 27358b04406SChristoph Hellwig #else 27458b04406SChristoph Hellwig return -ENXIO; 27558b04406SChristoph Hellwig #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 276cf65a0f6SChristoph Hellwig } 277*7249c1a5SChristoph Hellwig 278*7249c1a5SChristoph Hellwig /** 279*7249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space 280*7249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 281*7249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping 282*7249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 283*7249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs 284*7249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs 285*7249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs 286*7249c1a5SChristoph Hellwig * 287*7249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user 288*7249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the 289*7249c1a5SChristoph Hellwig * user space mapping has been released. 290*7249c1a5SChristoph Hellwig */ 291*7249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 292*7249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 293*7249c1a5SChristoph Hellwig unsigned long attrs) 294*7249c1a5SChristoph Hellwig { 295*7249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 296*7249c1a5SChristoph Hellwig BUG_ON(!ops); 297*7249c1a5SChristoph Hellwig if (ops->mmap) 298*7249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 299*7249c1a5SChristoph Hellwig return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 300*7249c1a5SChristoph Hellwig } 301*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs); 30205887cb6SChristoph Hellwig 30305887cb6SChristoph Hellwig #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK 30405887cb6SChristoph Hellwig static u64 dma_default_get_required_mask(struct device *dev) 30505887cb6SChristoph Hellwig { 30605887cb6SChristoph Hellwig u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); 30705887cb6SChristoph Hellwig u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); 30805887cb6SChristoph Hellwig u64 mask; 30905887cb6SChristoph Hellwig 31005887cb6SChristoph Hellwig if (!high_totalram) { 31105887cb6SChristoph Hellwig /* convert to mask just covering totalram */ 31205887cb6SChristoph Hellwig low_totalram = (1 << (fls(low_totalram) - 1)); 31305887cb6SChristoph Hellwig low_totalram += low_totalram - 1; 31405887cb6SChristoph Hellwig mask = low_totalram; 31505887cb6SChristoph Hellwig } else { 31605887cb6SChristoph Hellwig high_totalram = (1 << (fls(high_totalram) - 1)); 31705887cb6SChristoph Hellwig high_totalram += high_totalram - 1; 31805887cb6SChristoph Hellwig mask = (((u64)high_totalram) << 32) + 0xffffffff; 31905887cb6SChristoph Hellwig } 32005887cb6SChristoph Hellwig return mask; 32105887cb6SChristoph Hellwig } 32205887cb6SChristoph Hellwig 32305887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev) 32405887cb6SChristoph Hellwig { 32505887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 32605887cb6SChristoph Hellwig 32705887cb6SChristoph Hellwig if (ops->get_required_mask) 32805887cb6SChristoph Hellwig return ops->get_required_mask(dev); 32905887cb6SChristoph Hellwig return dma_default_get_required_mask(dev); 33005887cb6SChristoph Hellwig } 33105887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask); 33205887cb6SChristoph Hellwig #endif 33305887cb6SChristoph Hellwig 334*7249c1a5SChristoph Hellwig #ifndef arch_dma_alloc_attrs 335*7249c1a5SChristoph Hellwig #define arch_dma_alloc_attrs(dev) (true) 336*7249c1a5SChristoph Hellwig #endif 337*7249c1a5SChristoph Hellwig 338*7249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 339*7249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs) 340*7249c1a5SChristoph Hellwig { 341*7249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 342*7249c1a5SChristoph Hellwig void *cpu_addr; 343*7249c1a5SChristoph Hellwig 344*7249c1a5SChristoph Hellwig BUG_ON(!ops); 345*7249c1a5SChristoph Hellwig WARN_ON_ONCE(dev && !dev->coherent_dma_mask); 346*7249c1a5SChristoph Hellwig 347*7249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 348*7249c1a5SChristoph Hellwig return cpu_addr; 349*7249c1a5SChristoph Hellwig 350*7249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */ 351*7249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 352*7249c1a5SChristoph Hellwig 353*7249c1a5SChristoph Hellwig if (!arch_dma_alloc_attrs(&dev)) 354*7249c1a5SChristoph Hellwig return NULL; 355*7249c1a5SChristoph Hellwig if (!ops->alloc) 356*7249c1a5SChristoph Hellwig return NULL; 357*7249c1a5SChristoph Hellwig 358*7249c1a5SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 359*7249c1a5SChristoph Hellwig debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 360*7249c1a5SChristoph Hellwig return cpu_addr; 361*7249c1a5SChristoph Hellwig } 362*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs); 363*7249c1a5SChristoph Hellwig 364*7249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 365*7249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 366*7249c1a5SChristoph Hellwig { 367*7249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 368*7249c1a5SChristoph Hellwig 369*7249c1a5SChristoph Hellwig BUG_ON(!ops); 370*7249c1a5SChristoph Hellwig 371*7249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 372*7249c1a5SChristoph Hellwig return; 373*7249c1a5SChristoph Hellwig /* 374*7249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via 375*7249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting 376*7249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to 377*7249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is 378*7249c1a5SChristoph Hellwig * probably misusing the coherent API anyway. 379*7249c1a5SChristoph Hellwig */ 380*7249c1a5SChristoph Hellwig WARN_ON(irqs_disabled()); 381*7249c1a5SChristoph Hellwig 382*7249c1a5SChristoph Hellwig if (!ops->free || !cpu_addr) 383*7249c1a5SChristoph Hellwig return; 384*7249c1a5SChristoph Hellwig 385*7249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 386*7249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs); 387*7249c1a5SChristoph Hellwig } 388*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs); 389*7249c1a5SChristoph Hellwig 390*7249c1a5SChristoph Hellwig static inline void dma_check_mask(struct device *dev, u64 mask) 391*7249c1a5SChristoph Hellwig { 392*7249c1a5SChristoph Hellwig if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) 393*7249c1a5SChristoph Hellwig dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); 394*7249c1a5SChristoph Hellwig } 395*7249c1a5SChristoph Hellwig 396*7249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask) 397*7249c1a5SChristoph Hellwig { 398*7249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev); 399*7249c1a5SChristoph Hellwig 400*7249c1a5SChristoph Hellwig if (!ops) 401*7249c1a5SChristoph Hellwig return 0; 402*7249c1a5SChristoph Hellwig if (!ops->dma_supported) 403*7249c1a5SChristoph Hellwig return 1; 404*7249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask); 405*7249c1a5SChristoph Hellwig } 406*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported); 407*7249c1a5SChristoph Hellwig 408*7249c1a5SChristoph Hellwig #ifndef HAVE_ARCH_DMA_SET_MASK 409*7249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask) 410*7249c1a5SChristoph Hellwig { 411*7249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask)) 412*7249c1a5SChristoph Hellwig return -EIO; 413*7249c1a5SChristoph Hellwig 414*7249c1a5SChristoph Hellwig dma_check_mask(dev, mask); 415*7249c1a5SChristoph Hellwig *dev->dma_mask = mask; 416*7249c1a5SChristoph Hellwig return 0; 417*7249c1a5SChristoph Hellwig } 418*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask); 419*7249c1a5SChristoph Hellwig #endif 420*7249c1a5SChristoph Hellwig 421*7249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 422*7249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask) 423*7249c1a5SChristoph Hellwig { 424*7249c1a5SChristoph Hellwig if (!dma_supported(dev, mask)) 425*7249c1a5SChristoph Hellwig return -EIO; 426*7249c1a5SChristoph Hellwig 427*7249c1a5SChristoph Hellwig dma_check_mask(dev, mask); 428*7249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask; 429*7249c1a5SChristoph Hellwig return 0; 430*7249c1a5SChristoph Hellwig } 431*7249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask); 432*7249c1a5SChristoph Hellwig #endif 433