Lines Matching refs:dev
25 static inline dma_addr_t phys_to_dma_direct(struct device *dev, in phys_to_dma_direct() argument
28 if (force_dma_unencrypted(dev)) in phys_to_dma_direct()
29 return phys_to_dma_unencrypted(dev, phys); in phys_to_dma_direct()
30 return phys_to_dma(dev, phys); in phys_to_dma_direct()
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() argument
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); in dma_direct_to_page()
39 u64 dma_direct_get_required_mask(struct device *dev) in dma_direct_get_required_mask() argument
42 u64 max_dma = phys_to_dma_direct(dev, phys); in dma_direct_get_required_mask()
47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) in dma_direct_optimal_gfp_mask() argument
50 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask()
51 dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
61 *phys_limit = dma_to_phys(dev, dma_limit); in dma_direct_optimal_gfp_mask()
69 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
71 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); in dma_coherent_ok()
76 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in dma_coherent_ok()
79 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument
81 if (!force_dma_unencrypted(dev)) in dma_set_decrypted()
86 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument
90 if (!force_dma_unencrypted(dev)) in dma_set_encrypted()
98 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument
101 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
103 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
106 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() argument
108 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb()
110 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb()
111 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb()
118 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
121 int node = dev_to_node(dev); in __dma_direct_alloc_pages()
127 if (is_swiotlb_for_alloc(dev)) in __dma_direct_alloc_pages()
128 return dma_direct_alloc_swiotlb(dev, size); in __dma_direct_alloc_pages()
130 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); in __dma_direct_alloc_pages()
131 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
133 if (!dma_coherent_ok(dev, page_to_phys(page), size) || in __dma_direct_alloc_pages()
135 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
142 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
143 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
166 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) in dma_direct_use_pool() argument
168 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); in dma_direct_use_pool()
171 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
181 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); in dma_direct_alloc_from_pool()
182 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
185 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_from_pool()
189 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, in dma_direct_alloc_no_mapping() argument
194 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc_no_mapping()
203 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_no_mapping()
207 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
219 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
220 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); in dma_direct_alloc()
222 if (!dev_is_dma_coherent(dev)) { in dma_direct_alloc()
230 !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
231 return arch_dma_alloc(dev, size, dma_handle, gfp, in dma_direct_alloc()
239 return dma_alloc_from_global_coherent(dev, size, in dma_direct_alloc()
249 if (dma_direct_use_pool(dev, gfp)) in dma_direct_alloc()
250 return dma_direct_alloc_from_pool(dev, size, in dma_direct_alloc()
263 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) in dma_direct_alloc()
264 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
267 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc()
282 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); in dma_direct_alloc()
284 if (force_dma_unencrypted(dev)) in dma_direct_alloc()
297 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc()
310 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
314 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc()
317 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc()
323 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
329 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { in dma_direct_free()
331 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
338 !dev_is_dma_coherent(dev) && in dma_direct_free()
339 !is_swiotlb_for_alloc(dev)) { in dma_direct_free()
340 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
345 !dev_is_dma_coherent(dev)) { in dma_direct_free()
353 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
361 if (dma_set_encrypted(dev, cpu_addr, size)) in dma_direct_free()
365 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
368 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
374 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) in dma_direct_alloc_pages()
375 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
377 page = __dma_direct_alloc_pages(dev, size, gfp, false); in dma_direct_alloc_pages()
382 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc_pages()
385 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_pages()
391 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
399 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
402 if (dma_set_encrypted(dev, vaddr, size)) in dma_direct_free_pages()
404 __dma_direct_free_pages(dev, page, size); in dma_direct_free_pages()
409 void dma_direct_sync_sg_for_device(struct device *dev, in dma_direct_sync_sg_for_device() argument
416 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); in dma_direct_sync_sg_for_device()
418 if (unlikely(is_swiotlb_buffer(dev, paddr))) in dma_direct_sync_sg_for_device()
419 swiotlb_sync_single_for_device(dev, paddr, sg->length, in dma_direct_sync_sg_for_device()
422 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_device()
432 void dma_direct_sync_sg_for_cpu(struct device *dev, in dma_direct_sync_sg_for_cpu() argument
439 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); in dma_direct_sync_sg_for_cpu()
441 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_cpu()
444 if (unlikely(is_swiotlb_buffer(dev, paddr))) in dma_direct_sync_sg_for_cpu()
445 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, in dma_direct_sync_sg_for_cpu()
452 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_cpu()
460 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, in dma_direct_unmap_sg() argument
470 dma_direct_unmap_page(dev, sg->dma_address, in dma_direct_unmap_sg()
476 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, in dma_direct_map_sg() argument
486 map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg); in dma_direct_map_sg()
504 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), in dma_direct_map_sg()
516 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); in dma_direct_map_sg()
520 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, in dma_direct_map_resource() argument
525 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
526 dev_err_once(dev, in dma_direct_map_resource()
528 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
536 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, in dma_direct_get_sgtable() argument
540 struct page *page = dma_direct_to_page(dev, dma_addr); in dma_direct_get_sgtable()
549 bool dma_direct_can_mmap(struct device *dev) in dma_direct_can_mmap() argument
551 return dev_is_dma_coherent(dev) || in dma_direct_can_mmap()
555 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap() argument
561 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); in dma_direct_mmap()
564 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap()
565 if (force_dma_unencrypted(dev)) in dma_direct_mmap()
568 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
579 int dma_direct_supported(struct device *dev, u64 mask) in dma_direct_supported() argument
599 return mask >= phys_to_dma_unencrypted(dev, min_mask); in dma_direct_supported()
602 size_t dma_direct_max_mapping_size(struct device *dev) in dma_direct_max_mapping_size() argument
605 if (is_swiotlb_active(dev) && in dma_direct_max_mapping_size()
606 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) in dma_direct_max_mapping_size()
607 return swiotlb_max_mapping_size(dev); in dma_direct_max_mapping_size()
611 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) in dma_direct_need_sync() argument
613 return !dev_is_dma_coherent(dev) || in dma_direct_need_sync()
614 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); in dma_direct_need_sync()
633 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, in dma_direct_set_offset() argument
639 if (dev->dma_range_map) { in dma_direct_set_offset()
640 dev_err(dev, "attempt to add DMA range to existing map\n"); in dma_direct_set_offset()
654 dev->dma_range_map = map; in dma_direct_set_offset()