xref: /openbmc/linux/include/linux/dma-map-ops.h (revision 22e4a348)
10a0f0d8bSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
20a0f0d8bSChristoph Hellwig /*
30a0f0d8bSChristoph Hellwig  * This header is for implementations of dma_map_ops and related code.
40a0f0d8bSChristoph Hellwig  * It should not be included in drivers just using the DMA API.
50a0f0d8bSChristoph Hellwig  */
60a0f0d8bSChristoph Hellwig #ifndef _LINUX_DMA_MAP_OPS_H
70a0f0d8bSChristoph Hellwig #define _LINUX_DMA_MAP_OPS_H
80a0f0d8bSChristoph Hellwig 
90a0f0d8bSChristoph Hellwig #include <linux/dma-mapping.h>
109f4df96bSChristoph Hellwig #include <linux/pgtable.h>
11370645f4SCatalin Marinas #include <linux/slab.h>
120a0f0d8bSChristoph Hellwig 
130b1abd1fSChristoph Hellwig struct cma;
140b1abd1fSChristoph Hellwig 
15159bf192SLogan Gunthorpe /*
16159bf192SLogan Gunthorpe  * Values for struct dma_map_ops.flags:
17159bf192SLogan Gunthorpe  *
18159bf192SLogan Gunthorpe  * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
19159bf192SLogan Gunthorpe  * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
20159bf192SLogan Gunthorpe  */
21159bf192SLogan Gunthorpe #define DMA_F_PCI_P2PDMA_SUPPORTED     (1 << 0)
22159bf192SLogan Gunthorpe 
230a0f0d8bSChristoph Hellwig struct dma_map_ops {
24159bf192SLogan Gunthorpe 	unsigned int flags;
25159bf192SLogan Gunthorpe 
260a0f0d8bSChristoph Hellwig 	void *(*alloc)(struct device *dev, size_t size,
270a0f0d8bSChristoph Hellwig 			dma_addr_t *dma_handle, gfp_t gfp,
280a0f0d8bSChristoph Hellwig 			unsigned long attrs);
290a0f0d8bSChristoph Hellwig 	void (*free)(struct device *dev, size_t size, void *vaddr,
300a0f0d8bSChristoph Hellwig 			dma_addr_t dma_handle, unsigned long attrs);
310a0f0d8bSChristoph Hellwig 	struct page *(*alloc_pages)(struct device *dev, size_t size,
320a0f0d8bSChristoph Hellwig 			dma_addr_t *dma_handle, enum dma_data_direction dir,
330a0f0d8bSChristoph Hellwig 			gfp_t gfp);
340a0f0d8bSChristoph Hellwig 	void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
350a0f0d8bSChristoph Hellwig 			dma_addr_t dma_handle, enum dma_data_direction dir);
367d5b5738SChristoph Hellwig 	struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
377d5b5738SChristoph Hellwig 			enum dma_data_direction dir, gfp_t gfp,
387d5b5738SChristoph Hellwig 			unsigned long attrs);
397d5b5738SChristoph Hellwig 	void (*free_noncontiguous)(struct device *dev, size_t size,
407d5b5738SChristoph Hellwig 			struct sg_table *sgt, enum dma_data_direction dir);
410a0f0d8bSChristoph Hellwig 	int (*mmap)(struct device *, struct vm_area_struct *,
420a0f0d8bSChristoph Hellwig 			void *, dma_addr_t, size_t, unsigned long attrs);
430a0f0d8bSChristoph Hellwig 
440a0f0d8bSChristoph Hellwig 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
450a0f0d8bSChristoph Hellwig 			void *cpu_addr, dma_addr_t dma_addr, size_t size,
460a0f0d8bSChristoph Hellwig 			unsigned long attrs);
470a0f0d8bSChristoph Hellwig 
480a0f0d8bSChristoph Hellwig 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
490a0f0d8bSChristoph Hellwig 			unsigned long offset, size_t size,
500a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir, unsigned long attrs);
510a0f0d8bSChristoph Hellwig 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
520a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir,
530a0f0d8bSChristoph Hellwig 			unsigned long attrs);
540a0f0d8bSChristoph Hellwig 	/*
55fffe3cc8SLogan Gunthorpe 	 * map_sg should return a negative error code on error. See
56fffe3cc8SLogan Gunthorpe 	 * dma_map_sgtable() for a list of appropriate error codes
57fffe3cc8SLogan Gunthorpe 	 * and their meanings.
580a0f0d8bSChristoph Hellwig 	 */
590a0f0d8bSChristoph Hellwig 	int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
600a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir, unsigned long attrs);
610a0f0d8bSChristoph Hellwig 	void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
620a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir, unsigned long attrs);
630a0f0d8bSChristoph Hellwig 	dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
640a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir,
650a0f0d8bSChristoph Hellwig 			unsigned long attrs);
660a0f0d8bSChristoph Hellwig 	void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
670a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir,
680a0f0d8bSChristoph Hellwig 			unsigned long attrs);
690a0f0d8bSChristoph Hellwig 	void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
700a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir);
710a0f0d8bSChristoph Hellwig 	void (*sync_single_for_device)(struct device *dev,
720a0f0d8bSChristoph Hellwig 			dma_addr_t dma_handle, size_t size,
730a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir);
740a0f0d8bSChristoph Hellwig 	void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
750a0f0d8bSChristoph Hellwig 			int nents, enum dma_data_direction dir);
760a0f0d8bSChristoph Hellwig 	void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
770a0f0d8bSChristoph Hellwig 			int nents, enum dma_data_direction dir);
780a0f0d8bSChristoph Hellwig 	void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
790a0f0d8bSChristoph Hellwig 			enum dma_data_direction direction);
800a0f0d8bSChristoph Hellwig 	int (*dma_supported)(struct device *dev, u64 mask);
810a0f0d8bSChristoph Hellwig 	u64 (*get_required_mask)(struct device *dev);
820a0f0d8bSChristoph Hellwig 	size_t (*max_mapping_size)(struct device *dev);
83a229cc14SJohn Garry 	size_t (*opt_mapping_size)(void);
840a0f0d8bSChristoph Hellwig 	unsigned long (*get_merge_boundary)(struct device *dev);
850a0f0d8bSChristoph Hellwig };
860a0f0d8bSChristoph Hellwig 
870a0f0d8bSChristoph Hellwig #ifdef CONFIG_DMA_OPS
880a0f0d8bSChristoph Hellwig #include <asm/dma-mapping.h>
890a0f0d8bSChristoph Hellwig 
get_dma_ops(struct device * dev)900a0f0d8bSChristoph Hellwig static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
910a0f0d8bSChristoph Hellwig {
920a0f0d8bSChristoph Hellwig 	if (dev->dma_ops)
930a0f0d8bSChristoph Hellwig 		return dev->dma_ops;
94ade1229cSGreg Kroah-Hartman 	return get_arch_dma_ops();
950a0f0d8bSChristoph Hellwig }
960a0f0d8bSChristoph Hellwig 
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)970a0f0d8bSChristoph Hellwig static inline void set_dma_ops(struct device *dev,
980a0f0d8bSChristoph Hellwig 			       const struct dma_map_ops *dma_ops)
990a0f0d8bSChristoph Hellwig {
1000a0f0d8bSChristoph Hellwig 	dev->dma_ops = dma_ops;
1010a0f0d8bSChristoph Hellwig }
1020a0f0d8bSChristoph Hellwig #else /* CONFIG_DMA_OPS */
get_dma_ops(struct device * dev)1030a0f0d8bSChristoph Hellwig static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1040a0f0d8bSChristoph Hellwig {
1050a0f0d8bSChristoph Hellwig 	return NULL;
1060a0f0d8bSChristoph Hellwig }
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)1070a0f0d8bSChristoph Hellwig static inline void set_dma_ops(struct device *dev,
1080a0f0d8bSChristoph Hellwig 			       const struct dma_map_ops *dma_ops)
1090a0f0d8bSChristoph Hellwig {
1100a0f0d8bSChristoph Hellwig }
1110a0f0d8bSChristoph Hellwig #endif /* CONFIG_DMA_OPS */
1120a0f0d8bSChristoph Hellwig 
1130b1abd1fSChristoph Hellwig #ifdef CONFIG_DMA_CMA
1140b1abd1fSChristoph Hellwig extern struct cma *dma_contiguous_default_area;
1150b1abd1fSChristoph Hellwig 
dev_get_cma_area(struct device * dev)1160b1abd1fSChristoph Hellwig static inline struct cma *dev_get_cma_area(struct device *dev)
1170b1abd1fSChristoph Hellwig {
1180b1abd1fSChristoph Hellwig 	if (dev && dev->cma_area)
1190b1abd1fSChristoph Hellwig 		return dev->cma_area;
1200b1abd1fSChristoph Hellwig 	return dma_contiguous_default_area;
1210b1abd1fSChristoph Hellwig }
1220b1abd1fSChristoph Hellwig 
1230b1abd1fSChristoph Hellwig void dma_contiguous_reserve(phys_addr_t addr_limit);
1240b1abd1fSChristoph Hellwig int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
1250b1abd1fSChristoph Hellwig 		phys_addr_t limit, struct cma **res_cma, bool fixed);
1260b1abd1fSChristoph Hellwig 
1270b1abd1fSChristoph Hellwig struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
1280b1abd1fSChristoph Hellwig 				       unsigned int order, bool no_warn);
1290b1abd1fSChristoph Hellwig bool dma_release_from_contiguous(struct device *dev, struct page *pages,
1300b1abd1fSChristoph Hellwig 				 int count);
1310b1abd1fSChristoph Hellwig struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
1320b1abd1fSChristoph Hellwig void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
1335db5d930SChristoph Hellwig 
1345db5d930SChristoph Hellwig void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
1350b1abd1fSChristoph Hellwig #else /* CONFIG_DMA_CMA */
dev_get_cma_area(struct device * dev)1360b1abd1fSChristoph Hellwig static inline struct cma *dev_get_cma_area(struct device *dev)
1370b1abd1fSChristoph Hellwig {
1380b1abd1fSChristoph Hellwig 	return NULL;
1390b1abd1fSChristoph Hellwig }
dma_contiguous_reserve(phys_addr_t limit)1400b1abd1fSChristoph Hellwig static inline void dma_contiguous_reserve(phys_addr_t limit)
1410b1abd1fSChristoph Hellwig {
1420b1abd1fSChristoph Hellwig }
dma_contiguous_reserve_area(phys_addr_t size,phys_addr_t base,phys_addr_t limit,struct cma ** res_cma,bool fixed)1430b1abd1fSChristoph Hellwig static inline int dma_contiguous_reserve_area(phys_addr_t size,
1440b1abd1fSChristoph Hellwig 		phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
1450b1abd1fSChristoph Hellwig 		bool fixed)
1460b1abd1fSChristoph Hellwig {
1470b1abd1fSChristoph Hellwig 	return -ENOSYS;
1480b1abd1fSChristoph Hellwig }
dma_alloc_from_contiguous(struct device * dev,size_t count,unsigned int order,bool no_warn)1490b1abd1fSChristoph Hellwig static inline struct page *dma_alloc_from_contiguous(struct device *dev,
1500b1abd1fSChristoph Hellwig 		size_t count, unsigned int order, bool no_warn)
1510b1abd1fSChristoph Hellwig {
1520b1abd1fSChristoph Hellwig 	return NULL;
1530b1abd1fSChristoph Hellwig }
dma_release_from_contiguous(struct device * dev,struct page * pages,int count)1540b1abd1fSChristoph Hellwig static inline bool dma_release_from_contiguous(struct device *dev,
1550b1abd1fSChristoph Hellwig 		struct page *pages, int count)
1560b1abd1fSChristoph Hellwig {
1570b1abd1fSChristoph Hellwig 	return false;
1580b1abd1fSChristoph Hellwig }
1590b1abd1fSChristoph Hellwig /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
dma_alloc_contiguous(struct device * dev,size_t size,gfp_t gfp)1600b1abd1fSChristoph Hellwig static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
1610b1abd1fSChristoph Hellwig 		gfp_t gfp)
1620b1abd1fSChristoph Hellwig {
1630b1abd1fSChristoph Hellwig 	return NULL;
1640b1abd1fSChristoph Hellwig }
dma_free_contiguous(struct device * dev,struct page * page,size_t size)1650b1abd1fSChristoph Hellwig static inline void dma_free_contiguous(struct device *dev, struct page *page,
1660b1abd1fSChristoph Hellwig 		size_t size)
1670b1abd1fSChristoph Hellwig {
1680b1abd1fSChristoph Hellwig 	__free_pages(page, get_order(size));
1690b1abd1fSChristoph Hellwig }
1700b1abd1fSChristoph Hellwig #endif /* CONFIG_DMA_CMA*/
1710b1abd1fSChristoph Hellwig 
1720a0f0d8bSChristoph Hellwig #ifdef CONFIG_DMA_DECLARE_COHERENT
1730a0f0d8bSChristoph Hellwig int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
1740a0f0d8bSChristoph Hellwig 		dma_addr_t device_addr, size_t size);
175e61c4514SMark-PK Tsai void dma_release_coherent_memory(struct device *dev);
1760a0f0d8bSChristoph Hellwig int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
1770a0f0d8bSChristoph Hellwig 		dma_addr_t *dma_handle, void **ret);
1780a0f0d8bSChristoph Hellwig int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
1790a0f0d8bSChristoph Hellwig int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
1800a0f0d8bSChristoph Hellwig 		void *cpu_addr, size_t size, int *ret);
1810a0f0d8bSChristoph Hellwig #else
dma_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size)1820a0f0d8bSChristoph Hellwig static inline int dma_declare_coherent_memory(struct device *dev,
1830a0f0d8bSChristoph Hellwig 		phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
1840a0f0d8bSChristoph Hellwig {
1850a0f0d8bSChristoph Hellwig 	return -ENOSYS;
1860a0f0d8bSChristoph Hellwig }
187e61c4514SMark-PK Tsai 
1880a0f0d8bSChristoph Hellwig #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
1890a0f0d8bSChristoph Hellwig #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
1900a0f0d8bSChristoph Hellwig #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
dma_release_coherent_memory(struct device * dev)19150d6281cSRen Zhijie static inline void dma_release_coherent_memory(struct device *dev) { }
19222f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_DECLARE_COHERENT */
1930a0f0d8bSChristoph Hellwig 
19422f9feb4SChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL
19522f9feb4SChristoph Hellwig void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
19622f9feb4SChristoph Hellwig 		dma_addr_t *dma_handle);
19722f9feb4SChristoph Hellwig int dma_release_from_global_coherent(int order, void *vaddr);
19822f9feb4SChristoph Hellwig int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
19922f9feb4SChristoph Hellwig 		size_t size, int *ret);
20022f9feb4SChristoph Hellwig int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
20122f9feb4SChristoph Hellwig #else
dma_alloc_from_global_coherent(struct device * dev,ssize_t size,dma_addr_t * dma_handle)2020a0f0d8bSChristoph Hellwig static inline void *dma_alloc_from_global_coherent(struct device *dev,
2030a0f0d8bSChristoph Hellwig 		ssize_t size, dma_addr_t *dma_handle)
2040a0f0d8bSChristoph Hellwig {
2050a0f0d8bSChristoph Hellwig 	return NULL;
2060a0f0d8bSChristoph Hellwig }
dma_release_from_global_coherent(int order,void * vaddr)2070a0f0d8bSChristoph Hellwig static inline int dma_release_from_global_coherent(int order, void *vaddr)
2080a0f0d8bSChristoph Hellwig {
2090a0f0d8bSChristoph Hellwig 	return 0;
2100a0f0d8bSChristoph Hellwig }
dma_mmap_from_global_coherent(struct vm_area_struct * vma,void * cpu_addr,size_t size,int * ret)2110a0f0d8bSChristoph Hellwig static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
2120a0f0d8bSChristoph Hellwig 		void *cpu_addr, size_t size, int *ret)
2130a0f0d8bSChristoph Hellwig {
2140a0f0d8bSChristoph Hellwig 	return 0;
2150a0f0d8bSChristoph Hellwig }
21622f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_GLOBAL_POOL */
2170a0f0d8bSChristoph Hellwig 
2187d5b5738SChristoph Hellwig /*
2197d5b5738SChristoph Hellwig  * This is the actual return value from the ->alloc_noncontiguous method.
2207d5b5738SChristoph Hellwig  * The users of the DMA API should only care about the sg_table, but to make
2217d5b5738SChristoph Hellwig  * the DMA-API internal vmaping and freeing easier we stash away the page
2227d5b5738SChristoph Hellwig  * array as well (except for the fallback case).  This can go away any time,
2237d5b5738SChristoph Hellwig  * e.g. when a vmap-variant that takes a scatterlist comes along.
2247d5b5738SChristoph Hellwig  */
2257d5b5738SChristoph Hellwig struct dma_sgt_handle {
2267d5b5738SChristoph Hellwig 	struct sg_table sgt;
2277d5b5738SChristoph Hellwig 	struct page **pages;
2287d5b5738SChristoph Hellwig };
2297d5b5738SChristoph Hellwig #define sgt_handle(sgt) \
2307d5b5738SChristoph Hellwig 	container_of((sgt), struct dma_sgt_handle, sgt)
2317d5b5738SChristoph Hellwig 
232695cebe5SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
233695cebe5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
234695cebe5SChristoph Hellwig 		unsigned long attrs);
235695cebe5SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
236695cebe5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
237695cebe5SChristoph Hellwig 		unsigned long attrs);
238695cebe5SChristoph Hellwig struct page *dma_common_alloc_pages(struct device *dev, size_t size,
239695cebe5SChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
240695cebe5SChristoph Hellwig void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
241695cebe5SChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir);
242695cebe5SChristoph Hellwig 
243695cebe5SChristoph Hellwig struct page **dma_common_find_pages(void *cpu_addr);
244695cebe5SChristoph Hellwig void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
245695cebe5SChristoph Hellwig 		const void *caller);
246695cebe5SChristoph Hellwig void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
247695cebe5SChristoph Hellwig 		const void *caller);
248695cebe5SChristoph Hellwig void dma_common_free_remap(void *cpu_addr, size_t size);
249695cebe5SChristoph Hellwig 
250695cebe5SChristoph Hellwig struct page *dma_alloc_from_pool(struct device *dev, size_t size,
251695cebe5SChristoph Hellwig 		void **cpu_addr, gfp_t flags,
252695cebe5SChristoph Hellwig 		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
253695cebe5SChristoph Hellwig bool dma_free_from_pool(struct device *dev, void *start, size_t size);
254695cebe5SChristoph Hellwig 
25516fee29bSChristoph Hellwig int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
25616fee29bSChristoph Hellwig 		dma_addr_t dma_start, u64 size);
25716fee29bSChristoph Hellwig 
2586d4e9a8eSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2599f4df96bSChristoph Hellwig 	defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2609f4df96bSChristoph Hellwig 	defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2616d4e9a8eSChristoph Hellwig extern bool dma_default_coherent;
dev_is_dma_coherent(struct device * dev)2629f4df96bSChristoph Hellwig static inline bool dev_is_dma_coherent(struct device *dev)
2639f4df96bSChristoph Hellwig {
2649f4df96bSChristoph Hellwig 	return dev->dma_coherent;
2659f4df96bSChristoph Hellwig }
2669f4df96bSChristoph Hellwig #else
267fe4e5efaSJiaxun Yang #define dma_default_coherent true
268fe4e5efaSJiaxun Yang 
dev_is_dma_coherent(struct device * dev)2699f4df96bSChristoph Hellwig static inline bool dev_is_dma_coherent(struct device *dev)
2709f4df96bSChristoph Hellwig {
2719f4df96bSChristoph Hellwig 	return true;
2729f4df96bSChristoph Hellwig }
2739f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
2749f4df96bSChristoph Hellwig 
275370645f4SCatalin Marinas /*
276370645f4SCatalin Marinas  * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
277370645f4SCatalin Marinas  */
dma_kmalloc_safe(struct device * dev,enum dma_data_direction dir)278370645f4SCatalin Marinas static inline bool dma_kmalloc_safe(struct device *dev,
279370645f4SCatalin Marinas 				    enum dma_data_direction dir)
280370645f4SCatalin Marinas {
281370645f4SCatalin Marinas 	/*
282370645f4SCatalin Marinas 	 * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
283370645f4SCatalin Marinas 	 * caches have already been aligned to a DMA-safe size.
284370645f4SCatalin Marinas 	 */
285370645f4SCatalin Marinas 	if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
286370645f4SCatalin Marinas 		return true;
287370645f4SCatalin Marinas 
288370645f4SCatalin Marinas 	/*
289370645f4SCatalin Marinas 	 * kmalloc() buffers are DMA-safe irrespective of size if the device
290370645f4SCatalin Marinas 	 * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
291370645f4SCatalin Marinas 	 * cache maintenance and benign cache line evictions).
292370645f4SCatalin Marinas 	 */
293370645f4SCatalin Marinas 	if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
294370645f4SCatalin Marinas 		return true;
295370645f4SCatalin Marinas 
296370645f4SCatalin Marinas 	return false;
297370645f4SCatalin Marinas }
298370645f4SCatalin Marinas 
299370645f4SCatalin Marinas /*
300370645f4SCatalin Marinas  * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
301370645f4SCatalin Marinas  * sufficiently aligned for non-coherent DMA.
302370645f4SCatalin Marinas  */
dma_kmalloc_size_aligned(size_t size)303370645f4SCatalin Marinas static inline bool dma_kmalloc_size_aligned(size_t size)
304370645f4SCatalin Marinas {
305370645f4SCatalin Marinas 	/*
306370645f4SCatalin Marinas 	 * Larger kmalloc() sizes are guaranteed to be aligned to
307370645f4SCatalin Marinas 	 * ARCH_DMA_MINALIGN.
308370645f4SCatalin Marinas 	 */
309370645f4SCatalin Marinas 	if (size >= 2 * ARCH_DMA_MINALIGN ||
310370645f4SCatalin Marinas 	    IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
311370645f4SCatalin Marinas 		return true;
312370645f4SCatalin Marinas 
313370645f4SCatalin Marinas 	return false;
314370645f4SCatalin Marinas }
315370645f4SCatalin Marinas 
316370645f4SCatalin Marinas /*
317370645f4SCatalin Marinas  * Check whether the given object size may have originated from a kmalloc()
318370645f4SCatalin Marinas  * buffer with a slab alignment below the DMA-safe alignment and needs
319370645f4SCatalin Marinas  * bouncing for non-coherent DMA. The pointer alignment is not considered and
320370645f4SCatalin Marinas  * in-structure DMA-safe offsets are the responsibility of the caller. Such
321370645f4SCatalin Marinas  * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
322370645f4SCatalin Marinas  *
323370645f4SCatalin Marinas  * The heuristics can have false positives, bouncing unnecessarily, though the
324370645f4SCatalin Marinas  * buffers would be small. False negatives are theoretically possible if, for
325370645f4SCatalin Marinas  * example, multiple small kmalloc() buffers are coalesced into a larger
326370645f4SCatalin Marinas  * buffer that passes the alignment check. There are no such known constructs
327370645f4SCatalin Marinas  * in the kernel.
328370645f4SCatalin Marinas  */
dma_kmalloc_needs_bounce(struct device * dev,size_t size,enum dma_data_direction dir)329370645f4SCatalin Marinas static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
330370645f4SCatalin Marinas 					    enum dma_data_direction dir)
331370645f4SCatalin Marinas {
332370645f4SCatalin Marinas 	return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
333370645f4SCatalin Marinas }
334370645f4SCatalin Marinas 
3359f4df96bSChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
3369f4df96bSChristoph Hellwig 		gfp_t gfp, unsigned long attrs);
3379f4df96bSChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
3389f4df96bSChristoph Hellwig 		dma_addr_t dma_addr, unsigned long attrs);
3399f4df96bSChristoph Hellwig 
340*3d6f126bSArnd Bergmann #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
341*3d6f126bSArnd Bergmann void arch_dma_set_mask(struct device *dev, u64 mask);
342*3d6f126bSArnd Bergmann #else
343*3d6f126bSArnd Bergmann #define arch_dma_set_mask(dev, mask)	do { } while (0)
344*3d6f126bSArnd Bergmann #endif
345*3d6f126bSArnd Bergmann 
3469f4df96bSChristoph Hellwig #ifdef CONFIG_MMU
3479f4df96bSChristoph Hellwig /*
3489f4df96bSChristoph Hellwig  * Page protection so that devices that can't snoop CPU caches can use the
3499f4df96bSChristoph Hellwig  * memory coherently.  We default to pgprot_noncached which is usually used
3509f4df96bSChristoph Hellwig  * for ioremap as a safe bet, but architectures can override this with less
3519f4df96bSChristoph Hellwig  * strict semantics if possible.
3529f4df96bSChristoph Hellwig  */
3539f4df96bSChristoph Hellwig #ifndef pgprot_dmacoherent
3549f4df96bSChristoph Hellwig #define pgprot_dmacoherent(prot)	pgprot_noncached(prot)
3559f4df96bSChristoph Hellwig #endif
3569f4df96bSChristoph Hellwig 
3579f4df96bSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
3589f4df96bSChristoph Hellwig #else
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)3599f4df96bSChristoph Hellwig static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
3609f4df96bSChristoph Hellwig 		unsigned long attrs)
3619f4df96bSChristoph Hellwig {
3629f4df96bSChristoph Hellwig 	return prot;	/* no protection bits supported without page tables */
3639f4df96bSChristoph Hellwig }
3649f4df96bSChristoph Hellwig #endif /* CONFIG_MMU */
3659f4df96bSChristoph Hellwig 
3669f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
3679f4df96bSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
3689f4df96bSChristoph Hellwig 		enum dma_data_direction dir);
3699f4df96bSChristoph Hellwig #else
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)3709f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
3719f4df96bSChristoph Hellwig 		enum dma_data_direction dir)
3729f4df96bSChristoph Hellwig {
3739f4df96bSChristoph Hellwig }
3749f4df96bSChristoph Hellwig #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
3759f4df96bSChristoph Hellwig 
3769f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
3779f4df96bSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
3789f4df96bSChristoph Hellwig 		enum dma_data_direction dir);
3799f4df96bSChristoph Hellwig #else
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)3809f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
3819f4df96bSChristoph Hellwig 		enum dma_data_direction dir)
3829f4df96bSChristoph Hellwig {
3839f4df96bSChristoph Hellwig }
3849f4df96bSChristoph Hellwig #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
3859f4df96bSChristoph Hellwig 
3869f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
3879f4df96bSChristoph Hellwig void arch_sync_dma_for_cpu_all(void);
3889f4df96bSChristoph Hellwig #else
arch_sync_dma_for_cpu_all(void)3899f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_cpu_all(void)
3909f4df96bSChristoph Hellwig {
3919f4df96bSChristoph Hellwig }
3929f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
3939f4df96bSChristoph Hellwig 
3949f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
3959f4df96bSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size);
3969f4df96bSChristoph Hellwig #else
arch_dma_prep_coherent(struct page * page,size_t size)3979f4df96bSChristoph Hellwig static inline void arch_dma_prep_coherent(struct page *page, size_t size)
3989f4df96bSChristoph Hellwig {
3999f4df96bSChristoph Hellwig }
4009f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
4019f4df96bSChristoph Hellwig 
4029f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
4039f4df96bSChristoph Hellwig void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
4049f4df96bSChristoph Hellwig #else
arch_dma_mark_clean(phys_addr_t paddr,size_t size)4059f4df96bSChristoph Hellwig static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
4069f4df96bSChristoph Hellwig {
4079f4df96bSChristoph Hellwig }
4089f4df96bSChristoph Hellwig #endif /* ARCH_HAS_DMA_MARK_CLEAN */
4099f4df96bSChristoph Hellwig 
4109f4df96bSChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size);
4119f4df96bSChristoph Hellwig void arch_dma_clear_uncached(void *addr, size_t size);
4129f4df96bSChristoph Hellwig 
4138d8d53cfSAlexey Kardashevskiy #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
4148d8d53cfSAlexey Kardashevskiy bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
4158d8d53cfSAlexey Kardashevskiy bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
4168d8d53cfSAlexey Kardashevskiy bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
4178d8d53cfSAlexey Kardashevskiy 		int nents);
4188d8d53cfSAlexey Kardashevskiy bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
4198d8d53cfSAlexey Kardashevskiy 		int nents);
4208d8d53cfSAlexey Kardashevskiy #else
4218d8d53cfSAlexey Kardashevskiy #define arch_dma_map_page_direct(d, a)		(false)
4228d8d53cfSAlexey Kardashevskiy #define arch_dma_unmap_page_direct(d, a)	(false)
4238d8d53cfSAlexey Kardashevskiy #define arch_dma_map_sg_direct(d, s, n)		(false)
4248d8d53cfSAlexey Kardashevskiy #define arch_dma_unmap_sg_direct(d, s, n)	(false)
4258d8d53cfSAlexey Kardashevskiy #endif
4268d8d53cfSAlexey Kardashevskiy 
4270a0f0d8bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
4280a0f0d8bSChristoph Hellwig void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
4290a0f0d8bSChristoph Hellwig 		const struct iommu_ops *iommu, bool coherent);
4300a0f0d8bSChristoph Hellwig #else
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)4310a0f0d8bSChristoph Hellwig static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
4320a0f0d8bSChristoph Hellwig 		u64 size, const struct iommu_ops *iommu, bool coherent)
4330a0f0d8bSChristoph Hellwig {
4340a0f0d8bSChristoph Hellwig }
4350a0f0d8bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
4360a0f0d8bSChristoph Hellwig 
4370a0f0d8bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
4380a0f0d8bSChristoph Hellwig void arch_teardown_dma_ops(struct device *dev);
4390a0f0d8bSChristoph Hellwig #else
arch_teardown_dma_ops(struct device * dev)4400a0f0d8bSChristoph Hellwig static inline void arch_teardown_dma_ops(struct device *dev)
4410a0f0d8bSChristoph Hellwig {
4420a0f0d8bSChristoph Hellwig }
4430a0f0d8bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
4440a0f0d8bSChristoph Hellwig 
445a1fd09e8SChristoph Hellwig #ifdef CONFIG_DMA_API_DEBUG
446a1fd09e8SChristoph Hellwig void dma_debug_add_bus(struct bus_type *bus);
447a1fd09e8SChristoph Hellwig void debug_dma_dump_mappings(struct device *dev);
448a1fd09e8SChristoph Hellwig #else
dma_debug_add_bus(struct bus_type * bus)449a1fd09e8SChristoph Hellwig static inline void dma_debug_add_bus(struct bus_type *bus)
450a1fd09e8SChristoph Hellwig {
451a1fd09e8SChristoph Hellwig }
debug_dma_dump_mappings(struct device * dev)452a1fd09e8SChristoph Hellwig static inline void debug_dma_dump_mappings(struct device *dev)
453a1fd09e8SChristoph Hellwig {
454a1fd09e8SChristoph Hellwig }
455a1fd09e8SChristoph Hellwig #endif /* CONFIG_DMA_API_DEBUG */
456a1fd09e8SChristoph Hellwig 
4570a0f0d8bSChristoph Hellwig extern const struct dma_map_ops dma_dummy_ops;
4580a0f0d8bSChristoph Hellwig 
4595e180ff3SLogan Gunthorpe enum pci_p2pdma_map_type {
4605e180ff3SLogan Gunthorpe 	/*
4615e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
4625e180ff3SLogan Gunthorpe 	 * type hasn't been calculated yet. Functions that return this enum
4635e180ff3SLogan Gunthorpe 	 * never return this value.
4645e180ff3SLogan Gunthorpe 	 */
4655e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_UNKNOWN = 0,
4665e180ff3SLogan Gunthorpe 
4675e180ff3SLogan Gunthorpe 	/*
4685e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
4695e180ff3SLogan Gunthorpe 	 * traverse the host bridge and the host bridge is not in the
4705e180ff3SLogan Gunthorpe 	 * allowlist. DMA Mapping routines should return an error when
4715e180ff3SLogan Gunthorpe 	 * this is returned.
4725e180ff3SLogan Gunthorpe 	 */
4735e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_NOT_SUPPORTED,
4745e180ff3SLogan Gunthorpe 
4755e180ff3SLogan Gunthorpe 	/*
4765e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
4775e180ff3SLogan Gunthorpe 	 * each other directly through a PCI switch and the transaction will
4785e180ff3SLogan Gunthorpe 	 * not traverse the host bridge. Such a mapping should program
4795e180ff3SLogan Gunthorpe 	 * the DMA engine with PCI bus addresses.
4805e180ff3SLogan Gunthorpe 	 */
4815e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_BUS_ADDR,
4825e180ff3SLogan Gunthorpe 
4835e180ff3SLogan Gunthorpe 	/*
4845e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
4855e180ff3SLogan Gunthorpe 	 * to each other, but the transaction traverses a host bridge on the
4865e180ff3SLogan Gunthorpe 	 * allowlist. In this case, a normal mapping either with CPU physical
4875e180ff3SLogan Gunthorpe 	 * addresses (in the case of dma-direct) or IOVA addresses (in the
4885e180ff3SLogan Gunthorpe 	 * case of IOMMUs) should be used to program the DMA engine.
4895e180ff3SLogan Gunthorpe 	 */
4905e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
4915e180ff3SLogan Gunthorpe };
4925e180ff3SLogan Gunthorpe 
4935e180ff3SLogan Gunthorpe struct pci_p2pdma_map_state {
4945e180ff3SLogan Gunthorpe 	struct dev_pagemap *pgmap;
4955e180ff3SLogan Gunthorpe 	int map;
4965e180ff3SLogan Gunthorpe 	u64 bus_off;
4975e180ff3SLogan Gunthorpe };
4985e180ff3SLogan Gunthorpe 
4995e180ff3SLogan Gunthorpe #ifdef CONFIG_PCI_P2PDMA
5005e180ff3SLogan Gunthorpe enum pci_p2pdma_map_type
5015e180ff3SLogan Gunthorpe pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
5025e180ff3SLogan Gunthorpe 		       struct scatterlist *sg);
5035e180ff3SLogan Gunthorpe #else /* CONFIG_PCI_P2PDMA */
5045e180ff3SLogan Gunthorpe static inline enum pci_p2pdma_map_type
pci_p2pdma_map_segment(struct pci_p2pdma_map_state * state,struct device * dev,struct scatterlist * sg)5055e180ff3SLogan Gunthorpe pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
5065e180ff3SLogan Gunthorpe 		       struct scatterlist *sg)
5075e180ff3SLogan Gunthorpe {
5085e180ff3SLogan Gunthorpe 	return PCI_P2PDMA_MAP_NOT_SUPPORTED;
5095e180ff3SLogan Gunthorpe }
5105e180ff3SLogan Gunthorpe #endif /* CONFIG_PCI_P2PDMA */
5115e180ff3SLogan Gunthorpe 
5120a0f0d8bSChristoph Hellwig #endif /* _LINUX_DMA_MAP_OPS_H */
513