1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3efa70f2fSChristoph Hellwig * Copyright (C) 2018-2020 Christoph Hellwig.
4bc3ec75dSChristoph Hellwig *
5bc3ec75dSChristoph Hellwig * DMA operations that map physical memory directly without using an IOMMU.
6cf65a0f6SChristoph Hellwig */
757c8a661SMike Rapoport #include <linux/memblock.h> /* for max_pfn */
8cf65a0f6SChristoph Hellwig #include <linux/export.h>
9cf65a0f6SChristoph Hellwig #include <linux/mm.h>
100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
11cf65a0f6SChristoph Hellwig #include <linux/scatterlist.h>
12cf65a0f6SChristoph Hellwig #include <linux/pfn.h>
133acac065SChristoph Hellwig #include <linux/vmalloc.h>
14cf65a0f6SChristoph Hellwig #include <linux/set_memory.h>
15e0d07278SJim Quinlan #include <linux/slab.h>
1619c65c3dSChristoph Hellwig #include "direct.h"
17cf65a0f6SChristoph Hellwig
18cf65a0f6SChristoph Hellwig /*
197b7b8a2cSRandy Dunlap * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
208b5369eaSNicolas Saenz Julienne * it for entirely different regions. In that case the arch code needs to
218b5369eaSNicolas Saenz Julienne * override the variable below for dma-direct to work properly.
22cf65a0f6SChristoph Hellwig */
238b5369eaSNicolas Saenz Julienne unsigned int zone_dma_bits __ro_after_init = 24;
24cf65a0f6SChristoph Hellwig
phys_to_dma_direct(struct device * dev,phys_addr_t phys)25a20bb058SChristoph Hellwig static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26a20bb058SChristoph Hellwig phys_addr_t phys)
27a20bb058SChristoph Hellwig {
289087c375STom Lendacky if (force_dma_unencrypted(dev))
295ceda740SChristoph Hellwig return phys_to_dma_unencrypted(dev, phys);
30a20bb058SChristoph Hellwig return phys_to_dma(dev, phys);
31a20bb058SChristoph Hellwig }
32a20bb058SChristoph Hellwig
dma_direct_to_page(struct device * dev,dma_addr_t dma_addr)3334dc0ea6SChristoph Hellwig static inline struct page *dma_direct_to_page(struct device *dev,
3434dc0ea6SChristoph Hellwig dma_addr_t dma_addr)
3534dc0ea6SChristoph Hellwig {
3634dc0ea6SChristoph Hellwig return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
3734dc0ea6SChristoph Hellwig }
3834dc0ea6SChristoph Hellwig
dma_direct_get_required_mask(struct device * dev)39a20bb058SChristoph Hellwig u64 dma_direct_get_required_mask(struct device *dev)
40a20bb058SChristoph Hellwig {
41cdcda0d1SKishon Vijay Abraham I phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42cdcda0d1SKishon Vijay Abraham I u64 max_dma = phys_to_dma_direct(dev, phys);
43a20bb058SChristoph Hellwig
44a20bb058SChristoph Hellwig return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45a20bb058SChristoph Hellwig }
46a20bb058SChristoph Hellwig
dma_direct_optimal_gfp_mask(struct device * dev,u64 * phys_limit)4725a4ce56SPetr Tesarik static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
487d21ee4cSChristoph Hellwig {
4925a4ce56SPetr Tesarik u64 dma_limit = min_not_zero(
5025a4ce56SPetr Tesarik dev->coherent_dma_mask,
5125a4ce56SPetr Tesarik dev->bus_dma_limit);
52b4ebe606SChristoph Hellwig
5379ac32a4SChristoph Hellwig /*
5479ac32a4SChristoph Hellwig * Optimistically try the zone that the physical address mask falls
5579ac32a4SChristoph Hellwig * into first. If that returns memory that isn't actually addressable
5679ac32a4SChristoph Hellwig * we will fallback to the next lower zone and try again.
5779ac32a4SChristoph Hellwig *
5879ac32a4SChristoph Hellwig * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
5979ac32a4SChristoph Hellwig * zones.
6079ac32a4SChristoph Hellwig */
617bc5c428SChristoph Hellwig *phys_limit = dma_to_phys(dev, dma_limit);
62a7ba70f1SNicolas Saenz Julienne if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
637d21ee4cSChristoph Hellwig return GFP_DMA;
64a7ba70f1SNicolas Saenz Julienne if (*phys_limit <= DMA_BIT_MASK(32))
657d21ee4cSChristoph Hellwig return GFP_DMA32;
667d21ee4cSChristoph Hellwig return 0;
677d21ee4cSChristoph Hellwig }
687d21ee4cSChristoph Hellwig
dma_coherent_ok(struct device * dev,phys_addr_t phys,size_t size)6979636caaSPetr Tesarik bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
70cf65a0f6SChristoph Hellwig {
71e0d07278SJim Quinlan dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
72e0d07278SJim Quinlan
73e0d07278SJim Quinlan if (dma_addr == DMA_MAPPING_ERROR)
74e0d07278SJim Quinlan return false;
75e0d07278SJim Quinlan return dma_addr + size - 1 <=
76a7ba70f1SNicolas Saenz Julienne min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
77cf65a0f6SChristoph Hellwig }
78cf65a0f6SChristoph Hellwig
dma_set_decrypted(struct device * dev,void * vaddr,size_t size)794d056478SChristoph Hellwig static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
804d056478SChristoph Hellwig {
814d056478SChristoph Hellwig if (!force_dma_unencrypted(dev))
824d056478SChristoph Hellwig return 0;
834a37f3ddSRobin Murphy return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
844d056478SChristoph Hellwig }
854d056478SChristoph Hellwig
dma_set_encrypted(struct device * dev,void * vaddr,size_t size)864d056478SChristoph Hellwig static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
874d056478SChristoph Hellwig {
88a90cf304SChristoph Hellwig int ret;
89a90cf304SChristoph Hellwig
904d056478SChristoph Hellwig if (!force_dma_unencrypted(dev))
914d056478SChristoph Hellwig return 0;
924a37f3ddSRobin Murphy ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
93a90cf304SChristoph Hellwig if (ret)
94a90cf304SChristoph Hellwig pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
95a90cf304SChristoph Hellwig return ret;
964d056478SChristoph Hellwig }
974d056478SChristoph Hellwig
__dma_direct_free_pages(struct device * dev,struct page * page,size_t size)98f4111e39SClaire Chang static void __dma_direct_free_pages(struct device *dev, struct page *page,
99f4111e39SClaire Chang size_t size)
100f4111e39SClaire Chang {
101f5d3939aSChristoph Hellwig if (swiotlb_free(dev, page, size))
102f4111e39SClaire Chang return;
103f4111e39SClaire Chang dma_free_contiguous(dev, page, size);
104f4111e39SClaire Chang }
105f4111e39SClaire Chang
dma_direct_alloc_swiotlb(struct device * dev,size_t size)106aea7e2a8SChristoph Hellwig static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
107aea7e2a8SChristoph Hellwig {
108aea7e2a8SChristoph Hellwig struct page *page = swiotlb_alloc(dev, size);
109aea7e2a8SChristoph Hellwig
110aea7e2a8SChristoph Hellwig if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
111aea7e2a8SChristoph Hellwig swiotlb_free(dev, page, size);
112aea7e2a8SChristoph Hellwig return NULL;
113aea7e2a8SChristoph Hellwig }
114aea7e2a8SChristoph Hellwig
115aea7e2a8SChristoph Hellwig return page;
116aea7e2a8SChristoph Hellwig }
117aea7e2a8SChristoph Hellwig
__dma_direct_alloc_pages(struct device * dev,size_t size,gfp_t gfp,bool allow_highmem)11826749b32SChristoph Hellwig static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
11992826e96SChristoph Hellwig gfp_t gfp, bool allow_highmem)
120cf65a0f6SChristoph Hellwig {
12190ae409fSChristoph Hellwig int node = dev_to_node(dev);
122cf65a0f6SChristoph Hellwig struct page *page = NULL;
123a7ba70f1SNicolas Saenz Julienne u64 phys_limit;
124cf65a0f6SChristoph Hellwig
125633d5fceSDavid Rientjes WARN_ON_ONCE(!PAGE_ALIGNED(size));
126633d5fceSDavid Rientjes
127aea7e2a8SChristoph Hellwig if (is_swiotlb_for_alloc(dev))
128aea7e2a8SChristoph Hellwig return dma_direct_alloc_swiotlb(dev, size);
129aea7e2a8SChristoph Hellwig
13025a4ce56SPetr Tesarik gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
131633d5fceSDavid Rientjes page = dma_alloc_contiguous(dev, size, gfp);
13292826e96SChristoph Hellwig if (page) {
13392826e96SChristoph Hellwig if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
13492826e96SChristoph Hellwig (!allow_highmem && PageHighMem(page))) {
135633d5fceSDavid Rientjes dma_free_contiguous(dev, page, size);
13690ae409fSChristoph Hellwig page = NULL;
13790ae409fSChristoph Hellwig }
13892826e96SChristoph Hellwig }
139cf65a0f6SChristoph Hellwig again:
14090ae409fSChristoph Hellwig if (!page)
141633d5fceSDavid Rientjes page = alloc_pages_node(node, gfp, get_order(size));
142cf65a0f6SChristoph Hellwig if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
143b1d2dc00SNicolin Chen dma_free_contiguous(dev, page, size);
144cf65a0f6SChristoph Hellwig page = NULL;
145cf65a0f6SChristoph Hellwig
146cf65a0f6SChristoph Hellwig if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
147a7ba70f1SNicolas Saenz Julienne phys_limit < DMA_BIT_MASK(64) &&
148cf65a0f6SChristoph Hellwig !(gfp & (GFP_DMA32 | GFP_DMA))) {
149cf65a0f6SChristoph Hellwig gfp |= GFP_DMA32;
150cf65a0f6SChristoph Hellwig goto again;
151cf65a0f6SChristoph Hellwig }
152cf65a0f6SChristoph Hellwig
153fbce251bSChristoph Hellwig if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
154cf65a0f6SChristoph Hellwig gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
155cf65a0f6SChristoph Hellwig goto again;
156cf65a0f6SChristoph Hellwig }
157cf65a0f6SChristoph Hellwig }
158cf65a0f6SChristoph Hellwig
159b18814e7SChristoph Hellwig return page;
160b18814e7SChristoph Hellwig }
161b18814e7SChristoph Hellwig
16228e4576dSChristoph Hellwig /*
16328e4576dSChristoph Hellwig * Check if a potentially blocking operations needs to dip into the atomic
16428e4576dSChristoph Hellwig * pools for the given device/gfp.
16528e4576dSChristoph Hellwig */
dma_direct_use_pool(struct device * dev,gfp_t gfp)16628e4576dSChristoph Hellwig static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
16728e4576dSChristoph Hellwig {
16828e4576dSChristoph Hellwig return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
16928e4576dSChristoph Hellwig }
17028e4576dSChristoph Hellwig
dma_direct_alloc_from_pool(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)1715b138c53SChristoph Hellwig static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
1725b138c53SChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp)
1735b138c53SChristoph Hellwig {
1745b138c53SChristoph Hellwig struct page *page;
17525a4ce56SPetr Tesarik u64 phys_limit;
1765b138c53SChristoph Hellwig void *ret;
1775b138c53SChristoph Hellwig
17878bc7278SChristoph Hellwig if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
17978bc7278SChristoph Hellwig return NULL;
18078bc7278SChristoph Hellwig
18125a4ce56SPetr Tesarik gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
1825b138c53SChristoph Hellwig page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
1835b138c53SChristoph Hellwig if (!page)
1845b138c53SChristoph Hellwig return NULL;
1855b138c53SChristoph Hellwig *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
1865b138c53SChristoph Hellwig return ret;
1875b138c53SChristoph Hellwig }
1885b138c53SChristoph Hellwig
dma_direct_alloc_no_mapping(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)189d541ae55SChristoph Hellwig static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
190d541ae55SChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp)
191d541ae55SChristoph Hellwig {
192d541ae55SChristoph Hellwig struct page *page;
193d541ae55SChristoph Hellwig
19492826e96SChristoph Hellwig page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
195d541ae55SChristoph Hellwig if (!page)
196d541ae55SChristoph Hellwig return NULL;
197d541ae55SChristoph Hellwig
198d541ae55SChristoph Hellwig /* remove any dirty cache lines on the kernel alias */
199d541ae55SChristoph Hellwig if (!PageHighMem(page))
200d541ae55SChristoph Hellwig arch_dma_prep_coherent(page, size);
201d541ae55SChristoph Hellwig
202d541ae55SChristoph Hellwig /* return the page pointer as the opaque cookie */
203d541ae55SChristoph Hellwig *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
204d541ae55SChristoph Hellwig return page;
205d541ae55SChristoph Hellwig }
206d541ae55SChristoph Hellwig
dma_direct_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)2072f5388a2SChristoph Hellwig void *dma_direct_alloc(struct device *dev, size_t size,
208b18814e7SChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
209b18814e7SChristoph Hellwig {
210f3c96222SChristoph Hellwig bool remap = false, set_uncached = false;
211b18814e7SChristoph Hellwig struct page *page;
212b18814e7SChristoph Hellwig void *ret;
213b18814e7SChristoph Hellwig
214633d5fceSDavid Rientjes size = PAGE_ALIGN(size);
2153773dfe6SChristoph Hellwig if (attrs & DMA_ATTR_NO_WARN)
2163773dfe6SChristoph Hellwig gfp |= __GFP_NOWARN;
217633d5fceSDavid Rientjes
218849faceaSChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
219d541ae55SChristoph Hellwig !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
220d541ae55SChristoph Hellwig return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
221849faceaSChristoph Hellwig
222a86d1094SChristoph Hellwig if (!dev_is_dma_coherent(dev)) {
223a86d1094SChristoph Hellwig /*
224a86d1094SChristoph Hellwig * Fallback to the arch handler if it exists. This should
225a86d1094SChristoph Hellwig * eventually go away.
226a86d1094SChristoph Hellwig */
227849faceaSChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
228849faceaSChristoph Hellwig !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
229faf4ef82SChristoph Hellwig !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
230f4111e39SClaire Chang !is_swiotlb_for_alloc(dev))
231a86d1094SChristoph Hellwig return arch_dma_alloc(dev, size, dma_handle, gfp,
232a86d1094SChristoph Hellwig attrs);
233faf4ef82SChristoph Hellwig
234849faceaSChristoph Hellwig /*
235a86d1094SChristoph Hellwig * If there is a global pool, always allocate from it for
236a86d1094SChristoph Hellwig * non-coherent devices.
237a86d1094SChristoph Hellwig */
238a86d1094SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
239a86d1094SChristoph Hellwig return dma_alloc_from_global_coherent(dev, size,
240a86d1094SChristoph Hellwig dma_handle);
241a86d1094SChristoph Hellwig
242a86d1094SChristoph Hellwig /*
243a86d1094SChristoph Hellwig * Otherwise remap if the architecture is asking for it. But
244a86d1094SChristoph Hellwig * given that remapping memory is a blocking operation we'll
245a86d1094SChristoph Hellwig * instead have to dip into the atomic pools.
246a86d1094SChristoph Hellwig */
247a86d1094SChristoph Hellwig remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
248a86d1094SChristoph Hellwig if (remap) {
24928e4576dSChristoph Hellwig if (dma_direct_use_pool(dev, gfp))
250a86d1094SChristoph Hellwig return dma_direct_alloc_from_pool(dev, size,
251a86d1094SChristoph Hellwig dma_handle, gfp);
252a86d1094SChristoph Hellwig } else {
253955f58f7SChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
254955f58f7SChristoph Hellwig return NULL;
255a86d1094SChristoph Hellwig set_uncached = true;
256a86d1094SChristoph Hellwig }
257a86d1094SChristoph Hellwig }
258a86d1094SChristoph Hellwig
259a86d1094SChristoph Hellwig /*
260a86d1094SChristoph Hellwig * Decrypting memory may block, so allocate the memory from the atomic
261a86d1094SChristoph Hellwig * pools if we can't block.
262849faceaSChristoph Hellwig */
26328e4576dSChristoph Hellwig if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
2645b138c53SChristoph Hellwig return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
2653acac065SChristoph Hellwig
2663773dfe6SChristoph Hellwig /* we always manually zero the memory once we are done */
26792826e96SChristoph Hellwig page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
268cf65a0f6SChristoph Hellwig if (!page)
269cf65a0f6SChristoph Hellwig return NULL;
270f5ff79fdSChristoph Hellwig
271f3c96222SChristoph Hellwig /*
272f5ff79fdSChristoph Hellwig * dma_alloc_contiguous can return highmem pages depending on a
273f5ff79fdSChristoph Hellwig * combination the cma= arguments and per-arch setup. These need to be
274f5ff79fdSChristoph Hellwig * remapped to return a kernel virtual address.
275f3c96222SChristoph Hellwig */
276f5ff79fdSChristoph Hellwig if (PageHighMem(page)) {
277f3c96222SChristoph Hellwig remap = true;
278a86d1094SChristoph Hellwig set_uncached = false;
279a86d1094SChristoph Hellwig }
280f3c96222SChristoph Hellwig
281f3c96222SChristoph Hellwig if (remap) {
2824fe87e81SChristoph Hellwig pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
2834fe87e81SChristoph Hellwig
2844fe87e81SChristoph Hellwig if (force_dma_unencrypted(dev))
2854fe87e81SChristoph Hellwig prot = pgprot_decrypted(prot);
2864fe87e81SChristoph Hellwig
2873acac065SChristoph Hellwig /* remove any dirty cache lines on the kernel alias */
288633d5fceSDavid Rientjes arch_dma_prep_coherent(page, size);
2893acac065SChristoph Hellwig
2903acac065SChristoph Hellwig /* create a coherent mapping */
2914fe87e81SChristoph Hellwig ret = dma_common_contiguous_remap(page, size, prot,
2923acac065SChristoph Hellwig __builtin_return_address(0));
2933d0fc341SChristoph Hellwig if (!ret)
2943d0fc341SChristoph Hellwig goto out_free_pages;
295f3c96222SChristoph Hellwig } else {
296cf65a0f6SChristoph Hellwig ret = page_address(page);
2974d056478SChristoph Hellwig if (dma_set_decrypted(dev, ret, size))
298*4031b72cSRick Edgecombe goto out_leak_pages;
299f3c96222SChristoph Hellwig }
300f3c96222SChristoph Hellwig
301cf65a0f6SChristoph Hellwig memset(ret, 0, size);
302c30700dbSChristoph Hellwig
303f3c96222SChristoph Hellwig if (set_uncached) {
304c30700dbSChristoph Hellwig arch_dma_prep_coherent(page, size);
305fa7e2247SChristoph Hellwig ret = arch_dma_set_uncached(ret, size);
306fa7e2247SChristoph Hellwig if (IS_ERR(ret))
30796a539faSDavid Rientjes goto out_encrypt_pages;
308c30700dbSChristoph Hellwig }
309f3c96222SChristoph Hellwig
31096eb89caSChristoph Hellwig *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
311cf65a0f6SChristoph Hellwig return ret;
31296a539faSDavid Rientjes
31396a539faSDavid Rientjes out_encrypt_pages:
3144d056478SChristoph Hellwig if (dma_set_encrypted(dev, page_address(page), size))
31556fccf21SDavid Rientjes return NULL;
3163d0fc341SChristoph Hellwig out_free_pages:
317f4111e39SClaire Chang __dma_direct_free_pages(dev, page, size);
3183d0fc341SChristoph Hellwig return NULL;
319*4031b72cSRick Edgecombe out_leak_pages:
320*4031b72cSRick Edgecombe return NULL;
321cf65a0f6SChristoph Hellwig }
322cf65a0f6SChristoph Hellwig
dma_direct_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)3232f5388a2SChristoph Hellwig void dma_direct_free(struct device *dev, size_t size,
3242f5388a2SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
325cf65a0f6SChristoph Hellwig {
326cf65a0f6SChristoph Hellwig unsigned int page_order = get_order(size);
327cf65a0f6SChristoph Hellwig
328cf14be0bSChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
329f4111e39SClaire Chang !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
330d98849afSChristoph Hellwig /* cpu_addr is a struct page cookie, not a kernel address */
331acaade1aSChristoph Hellwig dma_free_contiguous(dev, cpu_addr, size);
332d98849afSChristoph Hellwig return;
333d98849afSChristoph Hellwig }
334d98849afSChristoph Hellwig
335849faceaSChristoph Hellwig if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
336849faceaSChristoph Hellwig !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
337faf4ef82SChristoph Hellwig !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
3383de18c86SLinus Torvalds !dev_is_dma_coherent(dev) &&
339f4111e39SClaire Chang !is_swiotlb_for_alloc(dev)) {
340849faceaSChristoph Hellwig arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
341849faceaSChristoph Hellwig return;
342849faceaSChristoph Hellwig }
343849faceaSChristoph Hellwig
344faf4ef82SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
345faf4ef82SChristoph Hellwig !dev_is_dma_coherent(dev)) {
346faf4ef82SChristoph Hellwig if (!dma_release_from_global_coherent(page_order, cpu_addr))
347faf4ef82SChristoph Hellwig WARN_ON_ONCE(1);
348faf4ef82SChristoph Hellwig return;
349faf4ef82SChristoph Hellwig }
350faf4ef82SChristoph Hellwig
351849faceaSChristoph Hellwig /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
352849faceaSChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
353849faceaSChristoph Hellwig dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
354849faceaSChristoph Hellwig return;
355849faceaSChristoph Hellwig
356f5ff79fdSChristoph Hellwig if (is_vmalloc_addr(cpu_addr)) {
3573acac065SChristoph Hellwig vunmap(cpu_addr);
3585570449bSChristoph Hellwig } else {
3595570449bSChristoph Hellwig if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
360999a5d12SChristoph Hellwig arch_dma_clear_uncached(cpu_addr, size);
3613be45625SDexuan Cui if (dma_set_encrypted(dev, cpu_addr, size))
362a90cf304SChristoph Hellwig return;
3635570449bSChristoph Hellwig }
3643acac065SChristoph Hellwig
365f4111e39SClaire Chang __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
366cf65a0f6SChristoph Hellwig }
367cf65a0f6SChristoph Hellwig
dma_direct_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)368efa70f2fSChristoph Hellwig struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
369efa70f2fSChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
370efa70f2fSChristoph Hellwig {
371efa70f2fSChristoph Hellwig struct page *page;
372efa70f2fSChristoph Hellwig void *ret;
373efa70f2fSChristoph Hellwig
37428e4576dSChristoph Hellwig if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
3755b138c53SChristoph Hellwig return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
376efa70f2fSChristoph Hellwig
37792826e96SChristoph Hellwig page = __dma_direct_alloc_pages(dev, size, gfp, false);
378efa70f2fSChristoph Hellwig if (!page)
379efa70f2fSChristoph Hellwig return NULL;
38008a89c28SChristoph Hellwig
381efa70f2fSChristoph Hellwig ret = page_address(page);
3824d056478SChristoph Hellwig if (dma_set_decrypted(dev, ret, size))
383*4031b72cSRick Edgecombe goto out_leak_pages;
384efa70f2fSChristoph Hellwig memset(ret, 0, size);
385efa70f2fSChristoph Hellwig *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
386efa70f2fSChristoph Hellwig return page;
387*4031b72cSRick Edgecombe out_leak_pages:
388efa70f2fSChristoph Hellwig return NULL;
389efa70f2fSChristoph Hellwig }
390efa70f2fSChristoph Hellwig
dma_direct_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_addr,enum dma_data_direction dir)391efa70f2fSChristoph Hellwig void dma_direct_free_pages(struct device *dev, size_t size,
392efa70f2fSChristoph Hellwig struct page *page, dma_addr_t dma_addr,
393efa70f2fSChristoph Hellwig enum dma_data_direction dir)
394efa70f2fSChristoph Hellwig {
395efa70f2fSChristoph Hellwig void *vaddr = page_address(page);
396efa70f2fSChristoph Hellwig
397efa70f2fSChristoph Hellwig /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
398849faceaSChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
399efa70f2fSChristoph Hellwig dma_free_from_pool(dev, vaddr, size))
400efa70f2fSChristoph Hellwig return;
401efa70f2fSChristoph Hellwig
4023be45625SDexuan Cui if (dma_set_encrypted(dev, vaddr, size))
403a90cf304SChristoph Hellwig return;
404f4111e39SClaire Chang __dma_direct_free_pages(dev, page, size);
405efa70f2fSChristoph Hellwig }
406efa70f2fSChristoph Hellwig
40755897af6SChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
40855897af6SChristoph Hellwig defined(CONFIG_SWIOTLB)
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)40955897af6SChristoph Hellwig void dma_direct_sync_sg_for_device(struct device *dev,
410bc3ec75dSChristoph Hellwig struct scatterlist *sgl, int nents, enum dma_data_direction dir)
411bc3ec75dSChristoph Hellwig {
412bc3ec75dSChristoph Hellwig struct scatterlist *sg;
413bc3ec75dSChristoph Hellwig int i;
414bc3ec75dSChristoph Hellwig
41555897af6SChristoph Hellwig for_each_sg(sgl, sg, nents, i) {
416449fa54dSFugang Duan phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
417449fa54dSFugang Duan
4187fd856aaSClaire Chang if (unlikely(is_swiotlb_buffer(dev, paddr)))
41980808d27SChristoph Hellwig swiotlb_sync_single_for_device(dev, paddr, sg->length,
42080808d27SChristoph Hellwig dir);
421bc3ec75dSChristoph Hellwig
42255897af6SChristoph Hellwig if (!dev_is_dma_coherent(dev))
42356e35f9cSChristoph Hellwig arch_sync_dma_for_device(paddr, sg->length,
42455897af6SChristoph Hellwig dir);
425bc3ec75dSChristoph Hellwig }
426bc3ec75dSChristoph Hellwig }
42717ac5247SChristoph Hellwig #endif
428bc3ec75dSChristoph Hellwig
429bc3ec75dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
43055897af6SChristoph Hellwig defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
43155897af6SChristoph Hellwig defined(CONFIG_SWIOTLB)
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)43255897af6SChristoph Hellwig void dma_direct_sync_sg_for_cpu(struct device *dev,
433bc3ec75dSChristoph Hellwig struct scatterlist *sgl, int nents, enum dma_data_direction dir)
434bc3ec75dSChristoph Hellwig {
435bc3ec75dSChristoph Hellwig struct scatterlist *sg;
436bc3ec75dSChristoph Hellwig int i;
437bc3ec75dSChristoph Hellwig
43855897af6SChristoph Hellwig for_each_sg(sgl, sg, nents, i) {
439449fa54dSFugang Duan phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
44055897af6SChristoph Hellwig
441449fa54dSFugang Duan if (!dev_is_dma_coherent(dev))
44256e35f9cSChristoph Hellwig arch_sync_dma_for_cpu(paddr, sg->length, dir);
443449fa54dSFugang Duan
4447fd856aaSClaire Chang if (unlikely(is_swiotlb_buffer(dev, paddr)))
44580808d27SChristoph Hellwig swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
44680808d27SChristoph Hellwig dir);
447abdaf11aSChristoph Hellwig
448abdaf11aSChristoph Hellwig if (dir == DMA_FROM_DEVICE)
449abdaf11aSChristoph Hellwig arch_dma_mark_clean(paddr, sg->length);
45055897af6SChristoph Hellwig }
45155897af6SChristoph Hellwig
45255897af6SChristoph Hellwig if (!dev_is_dma_coherent(dev))
45356e35f9cSChristoph Hellwig arch_sync_dma_for_cpu_all();
454bc3ec75dSChristoph Hellwig }
455bc3ec75dSChristoph Hellwig
456f02ad36dSLogan Gunthorpe /*
457f02ad36dSLogan Gunthorpe * Unmaps segments, except for ones marked as pci_p2pdma which do not
458f02ad36dSLogan Gunthorpe * require any further action as they contain a bus address.
459f02ad36dSLogan Gunthorpe */
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)46055897af6SChristoph Hellwig void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
461bc3ec75dSChristoph Hellwig int nents, enum dma_data_direction dir, unsigned long attrs)
462bc3ec75dSChristoph Hellwig {
46355897af6SChristoph Hellwig struct scatterlist *sg;
46455897af6SChristoph Hellwig int i;
46555897af6SChristoph Hellwig
466f02ad36dSLogan Gunthorpe for_each_sg(sgl, sg, nents, i) {
467cb147bbeSRobin Murphy if (sg_dma_is_bus_address(sg))
468f02ad36dSLogan Gunthorpe sg_dma_unmark_bus_address(sg);
469f02ad36dSLogan Gunthorpe else
470f02ad36dSLogan Gunthorpe dma_direct_unmap_page(dev, sg->dma_address,
471f02ad36dSLogan Gunthorpe sg_dma_len(sg), dir, attrs);
472f02ad36dSLogan Gunthorpe }
473bc3ec75dSChristoph Hellwig }
474bc3ec75dSChristoph Hellwig #endif
475bc3ec75dSChristoph Hellwig
dma_direct_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)476cf65a0f6SChristoph Hellwig int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
477cf65a0f6SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs)
478cf65a0f6SChristoph Hellwig {
479f02ad36dSLogan Gunthorpe struct pci_p2pdma_map_state p2pdma_state = {};
480f02ad36dSLogan Gunthorpe enum pci_p2pdma_map_type map;
481cf65a0f6SChristoph Hellwig struct scatterlist *sg;
482f02ad36dSLogan Gunthorpe int i, ret;
483cf65a0f6SChristoph Hellwig
484cf65a0f6SChristoph Hellwig for_each_sg(sgl, sg, nents, i) {
485f02ad36dSLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(sg))) {
486f02ad36dSLogan Gunthorpe map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
487f02ad36dSLogan Gunthorpe switch (map) {
488f02ad36dSLogan Gunthorpe case PCI_P2PDMA_MAP_BUS_ADDR:
489f02ad36dSLogan Gunthorpe continue;
490f02ad36dSLogan Gunthorpe case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
491f02ad36dSLogan Gunthorpe /*
492f02ad36dSLogan Gunthorpe * Any P2P mapping that traverses the PCI
493f02ad36dSLogan Gunthorpe * host bridge must be mapped with CPU physical
494f02ad36dSLogan Gunthorpe * address and not PCI bus addresses. This is
495f02ad36dSLogan Gunthorpe * done with dma_direct_map_page() below.
496f02ad36dSLogan Gunthorpe */
497f02ad36dSLogan Gunthorpe break;
498f02ad36dSLogan Gunthorpe default:
499f02ad36dSLogan Gunthorpe ret = -EREMOTEIO;
500f02ad36dSLogan Gunthorpe goto out_unmap;
501f02ad36dSLogan Gunthorpe }
502f02ad36dSLogan Gunthorpe }
503f02ad36dSLogan Gunthorpe
50417ac5247SChristoph Hellwig sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
50517ac5247SChristoph Hellwig sg->offset, sg->length, dir, attrs);
506f02ad36dSLogan Gunthorpe if (sg->dma_address == DMA_MAPPING_ERROR) {
507f02ad36dSLogan Gunthorpe ret = -EIO;
50855897af6SChristoph Hellwig goto out_unmap;
509f02ad36dSLogan Gunthorpe }
510cf65a0f6SChristoph Hellwig sg_dma_len(sg) = sg->length;
511cf65a0f6SChristoph Hellwig }
512cf65a0f6SChristoph Hellwig
513cf65a0f6SChristoph Hellwig return nents;
51455897af6SChristoph Hellwig
51555897af6SChristoph Hellwig out_unmap:
51655897af6SChristoph Hellwig dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
517f02ad36dSLogan Gunthorpe return ret;
518cf65a0f6SChristoph Hellwig }
519cf65a0f6SChristoph Hellwig
dma_direct_map_resource(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir,unsigned long attrs)520cfced786SChristoph Hellwig dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
521cfced786SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs)
522cfced786SChristoph Hellwig {
523cfced786SChristoph Hellwig dma_addr_t dma_addr = paddr;
524cfced786SChristoph Hellwig
52568a33b17SChristoph Hellwig if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
52675467ee4SChristoph Hellwig dev_err_once(dev,
52775467ee4SChristoph Hellwig "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
52875467ee4SChristoph Hellwig &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
52975467ee4SChristoph Hellwig WARN_ON_ONCE(1);
530cfced786SChristoph Hellwig return DMA_MAPPING_ERROR;
531cfced786SChristoph Hellwig }
532cfced786SChristoph Hellwig
533cfced786SChristoph Hellwig return dma_addr;
534cfced786SChristoph Hellwig }
535cfced786SChristoph Hellwig
dma_direct_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)53634dc0ea6SChristoph Hellwig int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
53734dc0ea6SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size,
53834dc0ea6SChristoph Hellwig unsigned long attrs)
53934dc0ea6SChristoph Hellwig {
54034dc0ea6SChristoph Hellwig struct page *page = dma_direct_to_page(dev, dma_addr);
54134dc0ea6SChristoph Hellwig int ret;
54234dc0ea6SChristoph Hellwig
54334dc0ea6SChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
54434dc0ea6SChristoph Hellwig if (!ret)
54534dc0ea6SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
54634dc0ea6SChristoph Hellwig return ret;
54734dc0ea6SChristoph Hellwig }
54834dc0ea6SChristoph Hellwig
dma_direct_can_mmap(struct device * dev)54934dc0ea6SChristoph Hellwig bool dma_direct_can_mmap(struct device *dev)
55034dc0ea6SChristoph Hellwig {
55134dc0ea6SChristoph Hellwig return dev_is_dma_coherent(dev) ||
55234dc0ea6SChristoph Hellwig IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
55334dc0ea6SChristoph Hellwig }
55434dc0ea6SChristoph Hellwig
dma_direct_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)55534dc0ea6SChristoph Hellwig int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
55634dc0ea6SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size,
55734dc0ea6SChristoph Hellwig unsigned long attrs)
55834dc0ea6SChristoph Hellwig {
55934dc0ea6SChristoph Hellwig unsigned long user_count = vma_pages(vma);
56034dc0ea6SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
56134dc0ea6SChristoph Hellwig unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
56234dc0ea6SChristoph Hellwig int ret = -ENXIO;
56334dc0ea6SChristoph Hellwig
56434dc0ea6SChristoph Hellwig vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
5654fe87e81SChristoph Hellwig if (force_dma_unencrypted(dev))
5664fe87e81SChristoph Hellwig vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
56734dc0ea6SChristoph Hellwig
56834dc0ea6SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
56934dc0ea6SChristoph Hellwig return ret;
570faf4ef82SChristoph Hellwig if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
571faf4ef82SChristoph Hellwig return ret;
57234dc0ea6SChristoph Hellwig
57334dc0ea6SChristoph Hellwig if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
57434dc0ea6SChristoph Hellwig return -ENXIO;
57534dc0ea6SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
57634dc0ea6SChristoph Hellwig user_count << PAGE_SHIFT, vma->vm_page_prot);
57734dc0ea6SChristoph Hellwig }
57834dc0ea6SChristoph Hellwig
dma_direct_supported(struct device * dev,u64 mask)579cf65a0f6SChristoph Hellwig int dma_direct_supported(struct device *dev, u64 mask)
580cf65a0f6SChristoph Hellwig {
58191ef26f9SChristoph Hellwig u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
5829d7a224bSChristoph Hellwig
58391ef26f9SChristoph Hellwig /*
58491ef26f9SChristoph Hellwig * Because 32-bit DMA masks are so common we expect every architecture
58591ef26f9SChristoph Hellwig * to be able to satisfy them - either by not supporting more physical
58691ef26f9SChristoph Hellwig * memory, or by providing a ZONE_DMA32. If neither is the case, the
58791ef26f9SChristoph Hellwig * architecture needs to use an IOMMU instead of the direct mapping.
58891ef26f9SChristoph Hellwig */
58991ef26f9SChristoph Hellwig if (mask >= DMA_BIT_MASK(32))
59091ef26f9SChristoph Hellwig return 1;
5919d7a224bSChristoph Hellwig
592c92a54cfSLendacky, Thomas /*
5935ceda740SChristoph Hellwig * This check needs to be against the actual bit mask value, so use
5945ceda740SChristoph Hellwig * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
595c92a54cfSLendacky, Thomas * part of the check.
596c92a54cfSLendacky, Thomas */
59791ef26f9SChristoph Hellwig if (IS_ENABLED(CONFIG_ZONE_DMA))
59891ef26f9SChristoph Hellwig min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
5995ceda740SChristoph Hellwig return mask >= phys_to_dma_unencrypted(dev, min_mask);
600cf65a0f6SChristoph Hellwig }
601133d624bSJoerg Roedel
dma_direct_max_mapping_size(struct device * dev)602133d624bSJoerg Roedel size_t dma_direct_max_mapping_size(struct device *dev)
603133d624bSJoerg Roedel {
604133d624bSJoerg Roedel /* If SWIOTLB is active, use its maximum mapping size */
6056f2beb26SClaire Chang if (is_swiotlb_active(dev) &&
606903cd0f3SClaire Chang (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
607a5008b59SChristoph Hellwig return swiotlb_max_mapping_size(dev);
608a5008b59SChristoph Hellwig return SIZE_MAX;
609133d624bSJoerg Roedel }
6103aa91625SChristoph Hellwig
dma_direct_need_sync(struct device * dev,dma_addr_t dma_addr)6113aa91625SChristoph Hellwig bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
6123aa91625SChristoph Hellwig {
6133aa91625SChristoph Hellwig return !dev_is_dma_coherent(dev) ||
6147fd856aaSClaire Chang is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
6153aa91625SChristoph Hellwig }
616e0d07278SJim Quinlan
617e0d07278SJim Quinlan /**
618e0d07278SJim Quinlan * dma_direct_set_offset - Assign scalar offset for a single DMA range.
619e0d07278SJim Quinlan * @dev: device pointer; needed to "own" the alloced memory.
620e0d07278SJim Quinlan * @cpu_start: beginning of memory region covered by this offset.
621e0d07278SJim Quinlan * @dma_start: beginning of DMA/PCI region covered by this offset.
622e0d07278SJim Quinlan * @size: size of the region.
623e0d07278SJim Quinlan *
624e0d07278SJim Quinlan * This is for the simple case of a uniform offset which cannot
625e0d07278SJim Quinlan * be discovered by "dma-ranges".
626e0d07278SJim Quinlan *
627e0d07278SJim Quinlan * It returns -ENOMEM if out of memory, -EINVAL if a map
628e0d07278SJim Quinlan * already exists, 0 otherwise.
629e0d07278SJim Quinlan *
630e0d07278SJim Quinlan * Note: any call to this from a driver is a bug. The mapping needs
631e0d07278SJim Quinlan * to be described by the device tree or other firmware interfaces.
632e0d07278SJim Quinlan */
dma_direct_set_offset(struct device * dev,phys_addr_t cpu_start,dma_addr_t dma_start,u64 size)633e0d07278SJim Quinlan int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
634e0d07278SJim Quinlan dma_addr_t dma_start, u64 size)
635e0d07278SJim Quinlan {
636e0d07278SJim Quinlan struct bus_dma_region *map;
637e0d07278SJim Quinlan u64 offset = (u64)cpu_start - (u64)dma_start;
638e0d07278SJim Quinlan
639e0d07278SJim Quinlan if (dev->dma_range_map) {
640e0d07278SJim Quinlan dev_err(dev, "attempt to add DMA range to existing map\n");
641e0d07278SJim Quinlan return -EINVAL;
642e0d07278SJim Quinlan }
643e0d07278SJim Quinlan
644e0d07278SJim Quinlan if (!offset)
645e0d07278SJim Quinlan return 0;
646e0d07278SJim Quinlan
647e0d07278SJim Quinlan map = kcalloc(2, sizeof(*map), GFP_KERNEL);
648e0d07278SJim Quinlan if (!map)
649e0d07278SJim Quinlan return -ENOMEM;
650e0d07278SJim Quinlan map[0].cpu_start = cpu_start;
651e0d07278SJim Quinlan map[0].dma_start = dma_start;
652e0d07278SJim Quinlan map[0].offset = offset;
653e0d07278SJim Quinlan map[0].size = size;
654e0d07278SJim Quinlan dev->dma_range_map = map;
655e0d07278SJim Quinlan return 0;
656e0d07278SJim Quinlan }
657