mapping.c (082f20b21de20285da2cbfc1be29656f0714c1b8) | mapping.c (c2bbf9d1e9ac7d4fdd503b190bc1ba8a6302bc49) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * arch-independent dma-mapping routines 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8#include <linux/memblock.h> /* for max_pfn */ --- 142 unchanged lines hidden (view full) --- 151 if (WARN_ON_ONCE(!dev->dma_mask)) 152 return DMA_MAPPING_ERROR; 153 154 if (dma_map_direct(dev, ops) || 155 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) 156 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 157 else 158 addr = ops->map_page(dev, page, offset, size, dir, attrs); | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * arch-independent dma-mapping routines 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8#include <linux/memblock.h> /* for max_pfn */ --- 142 unchanged lines hidden (view full) --- 151 if (WARN_ON_ONCE(!dev->dma_mask)) 152 return DMA_MAPPING_ERROR; 153 154 if (dma_map_direct(dev, ops) || 155 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) 156 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 157 else 158 addr = ops->map_page(dev, page, offset, size, dir, attrs); |
159 debug_dma_map_page(dev, page, offset, size, dir, addr); | 159 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); |
160 161 return addr; 162} 163EXPORT_SYMBOL(dma_map_page_attrs); 164 165void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 166 enum dma_data_direction dir, unsigned long attrs) 167{ --- 22 unchanged lines hidden (view full) --- 190 191 if (dma_map_direct(dev, ops) || 192 arch_dma_map_sg_direct(dev, sg, nents)) 193 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 194 else 195 ents = ops->map_sg(dev, sg, nents, dir, attrs); 196 197 if (ents > 0) | 160 161 return addr; 162} 163EXPORT_SYMBOL(dma_map_page_attrs); 164 165void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 166 enum dma_data_direction dir, unsigned long attrs) 167{ --- 22 unchanged lines hidden (view full) --- 190 191 if (dma_map_direct(dev, ops) || 192 arch_dma_map_sg_direct(dev, sg, nents)) 193 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 194 else 195 ents = ops->map_sg(dev, sg, nents, dir, attrs); 196 197 if (ents > 0) |
198 debug_dma_map_sg(dev, sg, nents, ents, dir); | 198 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); |
199 else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && 200 ents != -EIO)) 201 return -EIO; 202 203 return ents; 204} 205 206/** --- 37 unchanged lines hidden (view full) --- 244 * ownership for the buffer is transferred to the DMA domain. One has to 245 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 246 * ownership of the buffer back to the CPU domain before touching the 247 * buffer by the CPU. 248 * 249 * Returns 0 on success or a negative error code on error. The following 250 * error codes are supported with the given meaning: 251 * | 199 else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && 200 ents != -EIO)) 201 return -EIO; 202 203 return ents; 204} 205 206/** --- 37 unchanged lines hidden (view full) --- 244 * ownership for the buffer is transferred to the DMA domain. One has to 245 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 246 * ownership of the buffer back to the CPU domain before touching the 247 * buffer by the CPU. 248 * 249 * Returns 0 on success or a negative error code on error. The following 250 * error codes are supported with the given meaning: 251 * |
252 * -EINVAL - An invalid argument, unaligned access or other error 253 * in usage. Will not succeed if retried. 254 * -ENOMEM - Insufficient resources (like memory or IOVA space) to 255 * complete the mapping. Should succeed if retried later. 256 * -EIO - Legacy error code with an unknown meaning. eg. this is 257 * returned if a lower level call returned DMA_MAPPING_ERROR. | 252 * -EINVAL An invalid argument, unaligned access or other error 253 * in usage. Will not succeed if retried. 254 * -ENOMEM Insufficient resources (like memory or IOVA space) to 255 * complete the mapping. Should succeed if retried later. 256 * -EIO Legacy error code with an unknown meaning. eg. this is 257 * returned if a lower level call returned DMA_MAPPING_ERROR. |
258 */ 259int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 260 enum dma_data_direction dir, unsigned long attrs) 261{ 262 int nents; 263 264 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 265 if (nents < 0) --- 34 unchanged lines hidden (view full) --- 300 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) 301 return DMA_MAPPING_ERROR; 302 303 if (dma_map_direct(dev, ops)) 304 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); 305 else if (ops->map_resource) 306 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 307 | 258 */ 259int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 260 enum dma_data_direction dir, unsigned long attrs) 261{ 262 int nents; 263 264 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 265 if (nents < 0) --- 34 unchanged lines hidden (view full) --- 300 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) 301 return DMA_MAPPING_ERROR; 302 303 if (dma_map_direct(dev, ops)) 304 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); 305 else if (ops->map_resource) 306 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 307 |
308 debug_dma_map_resource(dev, phys_addr, size, dir, addr); | 308 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); |
309 return addr; 310} 311EXPORT_SYMBOL(dma_map_resource); 312 313void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 314 enum dma_data_direction dir, unsigned long attrs) 315{ 316 const struct dma_map_ops *ops = get_dma_ops(dev); --- 188 unchanged lines hidden (view full) --- 505 506 if (dma_alloc_direct(dev, ops)) 507 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 508 else if (ops->alloc) 509 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 510 else 511 return NULL; 512 | 309 return addr; 310} 311EXPORT_SYMBOL(dma_map_resource); 312 313void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 314 enum dma_data_direction dir, unsigned long attrs) 315{ 316 const struct dma_map_ops *ops = get_dma_ops(dev); --- 188 unchanged lines hidden (view full) --- 505 506 if (dma_alloc_direct(dev, ops)) 507 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); 508 else if (ops->alloc) 509 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 510 else 511 return NULL; 512 |
513 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | 513 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); |
514 return cpu_addr; 515} 516EXPORT_SYMBOL(dma_alloc_attrs); 517 518void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 519 dma_addr_t dma_handle, unsigned long attrs) 520{ 521 const struct dma_map_ops *ops = get_dma_ops(dev); --- 39 unchanged lines hidden (view full) --- 561} 562 563struct page *dma_alloc_pages(struct device *dev, size_t size, 564 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 565{ 566 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); 567 568 if (page) | 514 return cpu_addr; 515} 516EXPORT_SYMBOL(dma_alloc_attrs); 517 518void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 519 dma_addr_t dma_handle, unsigned long attrs) 520{ 521 const struct dma_map_ops *ops = get_dma_ops(dev); --- 39 unchanged lines hidden (view full) --- 561} 562 563struct page *dma_alloc_pages(struct device *dev, size_t size, 564 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 565{ 566 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); 567 568 if (page) |
569 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); | 569 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); |
570 return page; 571} 572EXPORT_SYMBOL_GPL(dma_alloc_pages); 573 574static void __dma_free_pages(struct device *dev, size_t size, struct page *page, 575 dma_addr_t dma_handle, enum dma_data_direction dir) 576{ 577 const struct dma_map_ops *ops = get_dma_ops(dev); --- 61 unchanged lines hidden (view full) --- 639 640 if (ops && ops->alloc_noncontiguous) 641 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); 642 else 643 sgt = alloc_single_sgt(dev, size, dir, gfp); 644 645 if (sgt) { 646 sgt->nents = 1; | 570 return page; 571} 572EXPORT_SYMBOL_GPL(dma_alloc_pages); 573 574static void __dma_free_pages(struct device *dev, size_t size, struct page *page, 575 dma_addr_t dma_handle, enum dma_data_direction dir) 576{ 577 const struct dma_map_ops *ops = get_dma_ops(dev); --- 61 unchanged lines hidden (view full) --- 639 640 if (ops && ops->alloc_noncontiguous) 641 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); 642 else 643 sgt = alloc_single_sgt(dev, size, dir, gfp); 644 645 if (sgt) { 646 sgt->nents = 1; |
647 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir); | 647 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); |
648 } 649 return sgt; 650} 651EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); 652 653static void free_single_sgt(struct device *dev, size_t size, 654 struct sg_table *sgt, enum dma_data_direction dir) 655{ --- 148 unchanged lines hidden --- | 648 } 649 return sgt; 650} 651EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); 652 653static void free_single_sgt(struct device *dev, size_t size, 654 struct sg_table *sgt, enum dma_data_direction dir) 655{ --- 148 unchanged lines hidden --- |