1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * This header is for implementations of dma_map_ops and related code. 4 * It should not be included in drivers just using the DMA API. 5 */ 6 #ifndef _LINUX_DMA_MAP_OPS_H 7 #define _LINUX_DMA_MAP_OPS_H 8 9 #include <linux/dma-mapping.h> 10 #include <linux/pgtable.h> 11 12 struct cma; 13 14 struct dma_map_ops { 15 void *(*alloc)(struct device *dev, size_t size, 16 dma_addr_t *dma_handle, gfp_t gfp, 17 unsigned long attrs); 18 void (*free)(struct device *dev, size_t size, void *vaddr, 19 dma_addr_t dma_handle, unsigned long attrs); 20 struct page *(*alloc_pages)(struct device *dev, size_t size, 21 dma_addr_t *dma_handle, enum dma_data_direction dir, 22 gfp_t gfp); 23 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, 24 dma_addr_t dma_handle, enum dma_data_direction dir); 25 int (*mmap)(struct device *, struct vm_area_struct *, 26 void *, dma_addr_t, size_t, unsigned long attrs); 27 28 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, 29 void *cpu_addr, dma_addr_t dma_addr, size_t size, 30 unsigned long attrs); 31 32 dma_addr_t (*map_page)(struct device *dev, struct page *page, 33 unsigned long offset, size_t size, 34 enum dma_data_direction dir, unsigned long attrs); 35 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 36 size_t size, enum dma_data_direction dir, 37 unsigned long attrs); 38 /* 39 * map_sg returns 0 on error and a value > 0 on success. 40 * It should never return a value < 0. 41 */ 42 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 43 enum dma_data_direction dir, unsigned long attrs); 44 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, 45 enum dma_data_direction dir, unsigned long attrs); 46 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, 47 size_t size, enum dma_data_direction dir, 48 unsigned long attrs); 49 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, 50 size_t size, enum dma_data_direction dir, 51 unsigned long attrs); 52 void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, 53 size_t size, enum dma_data_direction dir); 54 void (*sync_single_for_device)(struct device *dev, 55 dma_addr_t dma_handle, size_t size, 56 enum dma_data_direction dir); 57 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 58 int nents, enum dma_data_direction dir); 59 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, 60 int nents, enum dma_data_direction dir); 61 void (*cache_sync)(struct device *dev, void *vaddr, size_t size, 62 enum dma_data_direction direction); 63 int (*dma_supported)(struct device *dev, u64 mask); 64 u64 (*get_required_mask)(struct device *dev); 65 size_t (*max_mapping_size)(struct device *dev); 66 unsigned long (*get_merge_boundary)(struct device *dev); 67 }; 68 69 #ifdef CONFIG_DMA_OPS 70 #include <asm/dma-mapping.h> 71 72 static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 73 { 74 if (dev->dma_ops) 75 return dev->dma_ops; 76 return get_arch_dma_ops(dev->bus); 77 } 78 79 static inline void set_dma_ops(struct device *dev, 80 const struct dma_map_ops *dma_ops) 81 { 82 dev->dma_ops = dma_ops; 83 } 84 #else /* CONFIG_DMA_OPS */ 85 static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 86 { 87 return NULL; 88 } 89 static inline void set_dma_ops(struct device *dev, 90 const struct dma_map_ops *dma_ops) 91 { 92 } 93 #endif /* CONFIG_DMA_OPS */ 94 95 #ifdef CONFIG_DMA_CMA 96 extern struct cma *dma_contiguous_default_area; 97 98 static inline struct cma *dev_get_cma_area(struct device *dev) 99 { 100 if (dev && dev->cma_area) 101 return dev->cma_area; 102 return dma_contiguous_default_area; 103 } 104 105 void dma_contiguous_reserve(phys_addr_t addr_limit); 106 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 107 phys_addr_t limit, struct cma **res_cma, bool fixed); 108 109 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 110 unsigned int order, bool no_warn); 111 bool dma_release_from_contiguous(struct device *dev, struct page *pages, 112 int count); 113 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); 114 void dma_free_contiguous(struct device *dev, struct page *page, size_t size); 115 116 void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); 117 #else /* CONFIG_DMA_CMA */ 118 static inline struct cma *dev_get_cma_area(struct device *dev) 119 { 120 return NULL; 121 } 122 static inline void dma_contiguous_reserve(phys_addr_t limit) 123 { 124 } 125 static inline int dma_contiguous_reserve_area(phys_addr_t size, 126 phys_addr_t base, phys_addr_t limit, struct cma **res_cma, 127 bool fixed) 128 { 129 return -ENOSYS; 130 } 131 static inline struct page *dma_alloc_from_contiguous(struct device *dev, 132 size_t count, unsigned int order, bool no_warn) 133 { 134 return NULL; 135 } 136 static inline bool dma_release_from_contiguous(struct device *dev, 137 struct page *pages, int count) 138 { 139 return false; 140 } 141 /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ 142 static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 143 gfp_t gfp) 144 { 145 return NULL; 146 } 147 static inline void dma_free_contiguous(struct device *dev, struct page *page, 148 size_t size) 149 { 150 __free_pages(page, get_order(size)); 151 } 152 #endif /* CONFIG_DMA_CMA*/ 153 154 #ifdef CONFIG_DMA_PERNUMA_CMA 155 void dma_pernuma_cma_reserve(void); 156 #else 157 static inline void dma_pernuma_cma_reserve(void) { } 158 #endif /* CONFIG_DMA_PERNUMA_CMA */ 159 160 #ifdef CONFIG_DMA_DECLARE_COHERENT 161 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 162 dma_addr_t device_addr, size_t size); 163 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, 164 dma_addr_t *dma_handle, void **ret); 165 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); 166 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 167 void *cpu_addr, size_t size, int *ret); 168 169 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 170 dma_addr_t *dma_handle); 171 int dma_release_from_global_coherent(int order, void *vaddr); 172 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, 173 size_t size, int *ret); 174 175 #else 176 static inline int dma_declare_coherent_memory(struct device *dev, 177 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) 178 { 179 return -ENOSYS; 180 } 181 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) 182 #define dma_release_from_dev_coherent(dev, order, vaddr) (0) 183 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) 184 185 static inline void *dma_alloc_from_global_coherent(struct device *dev, 186 ssize_t size, dma_addr_t *dma_handle) 187 { 188 return NULL; 189 } 190 static inline int dma_release_from_global_coherent(int order, void *vaddr) 191 { 192 return 0; 193 } 194 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, 195 void *cpu_addr, size_t size, int *ret) 196 { 197 return 0; 198 } 199 #endif /* CONFIG_DMA_DECLARE_COHERENT */ 200 201 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 202 void *cpu_addr, dma_addr_t dma_addr, size_t size, 203 unsigned long attrs); 204 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 205 void *cpu_addr, dma_addr_t dma_addr, size_t size, 206 unsigned long attrs); 207 struct page *dma_common_alloc_pages(struct device *dev, size_t size, 208 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 209 void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, 210 dma_addr_t dma_handle, enum dma_data_direction dir); 211 212 struct page **dma_common_find_pages(void *cpu_addr); 213 void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, 214 const void *caller); 215 void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, 216 const void *caller); 217 void dma_common_free_remap(void *cpu_addr, size_t size); 218 219 struct page *dma_alloc_from_pool(struct device *dev, size_t size, 220 void **cpu_addr, gfp_t flags, 221 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); 222 bool dma_free_from_pool(struct device *dev, void *start, size_t size); 223 224 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, 225 dma_addr_t dma_start, u64 size); 226 227 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 228 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 229 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 230 extern bool dma_default_coherent; 231 static inline bool dev_is_dma_coherent(struct device *dev) 232 { 233 return dev->dma_coherent; 234 } 235 #else 236 static inline bool dev_is_dma_coherent(struct device *dev) 237 { 238 return true; 239 } 240 #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ 241 242 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 243 gfp_t gfp, unsigned long attrs); 244 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 245 dma_addr_t dma_addr, unsigned long attrs); 246 247 #ifdef CONFIG_MMU 248 /* 249 * Page protection so that devices that can't snoop CPU caches can use the 250 * memory coherently. We default to pgprot_noncached which is usually used 251 * for ioremap as a safe bet, but architectures can override this with less 252 * strict semantics if possible. 253 */ 254 #ifndef pgprot_dmacoherent 255 #define pgprot_dmacoherent(prot) pgprot_noncached(prot) 256 #endif 257 258 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 259 #else 260 static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 261 unsigned long attrs) 262 { 263 return prot; /* no protection bits supported without page tables */ 264 } 265 #endif /* CONFIG_MMU */ 266 267 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE 268 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 269 enum dma_data_direction dir); 270 #else 271 static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 272 enum dma_data_direction dir) 273 { 274 } 275 #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ 276 277 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU 278 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 279 enum dma_data_direction dir); 280 #else 281 static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 282 enum dma_data_direction dir) 283 { 284 } 285 #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ 286 287 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL 288 void arch_sync_dma_for_cpu_all(void); 289 #else 290 static inline void arch_sync_dma_for_cpu_all(void) 291 { 292 } 293 #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ 294 295 #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT 296 void arch_dma_prep_coherent(struct page *page, size_t size); 297 #else 298 static inline void arch_dma_prep_coherent(struct page *page, size_t size) 299 { 300 } 301 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ 302 303 #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN 304 void arch_dma_mark_clean(phys_addr_t paddr, size_t size); 305 #else 306 static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) 307 { 308 } 309 #endif /* ARCH_HAS_DMA_MARK_CLEAN */ 310 311 void *arch_dma_set_uncached(void *addr, size_t size); 312 void arch_dma_clear_uncached(void *addr, size_t size); 313 314 #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT 315 bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); 316 bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); 317 bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, 318 int nents); 319 bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, 320 int nents); 321 #else 322 #define arch_dma_map_page_direct(d, a) (false) 323 #define arch_dma_unmap_page_direct(d, a) (false) 324 #define arch_dma_map_sg_direct(d, s, n) (false) 325 #define arch_dma_unmap_sg_direct(d, s, n) (false) 326 #endif 327 328 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS 329 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 330 const struct iommu_ops *iommu, bool coherent); 331 #else 332 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 333 u64 size, const struct iommu_ops *iommu, bool coherent) 334 { 335 } 336 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ 337 338 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS 339 void arch_teardown_dma_ops(struct device *dev); 340 #else 341 static inline void arch_teardown_dma_ops(struct device *dev) 342 { 343 } 344 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ 345 346 #ifdef CONFIG_DMA_API_DEBUG 347 void dma_debug_add_bus(struct bus_type *bus); 348 void debug_dma_dump_mappings(struct device *dev); 349 #else 350 static inline void dma_debug_add_bus(struct bus_type *bus) 351 { 352 } 353 static inline void debug_dma_dump_mappings(struct device *dev) 354 { 355 } 356 #endif /* CONFIG_DMA_API_DEBUG */ 357 358 extern const struct dma_map_ops dma_dummy_ops; 359 360 #endif /* _LINUX_DMA_MAP_OPS_H */ 361