1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig * Coherent per-device memory handling.
4cf65a0f6SChristoph Hellwig * Borrowed from i386
5cf65a0f6SChristoph Hellwig */
6cf65a0f6SChristoph Hellwig #include <linux/io.h>
7cf65a0f6SChristoph Hellwig #include <linux/slab.h>
8cf65a0f6SChristoph Hellwig #include <linux/kernel.h>
9cf65a0f6SChristoph Hellwig #include <linux/module.h>
10e0d07278SJim Quinlan #include <linux/dma-direct.h>
110a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
12cf65a0f6SChristoph Hellwig
13cf65a0f6SChristoph Hellwig struct dma_coherent_mem {
14cf65a0f6SChristoph Hellwig void *virt_base;
15cf65a0f6SChristoph Hellwig dma_addr_t device_base;
16cf65a0f6SChristoph Hellwig unsigned long pfn_base;
17cf65a0f6SChristoph Hellwig int size;
18cf65a0f6SChristoph Hellwig unsigned long *bitmap;
19cf65a0f6SChristoph Hellwig spinlock_t spinlock;
20cf65a0f6SChristoph Hellwig bool use_dev_dma_pfn_offset;
21cf65a0f6SChristoph Hellwig };
22cf65a0f6SChristoph Hellwig
dev_get_coherent_memory(struct device * dev)23cf65a0f6SChristoph Hellwig static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
24cf65a0f6SChristoph Hellwig {
25cf65a0f6SChristoph Hellwig if (dev && dev->dma_mem)
26cf65a0f6SChristoph Hellwig return dev->dma_mem;
27cf65a0f6SChristoph Hellwig return NULL;
28cf65a0f6SChristoph Hellwig }
29cf65a0f6SChristoph Hellwig
dma_get_device_base(struct device * dev,struct dma_coherent_mem * mem)30cf65a0f6SChristoph Hellwig static inline dma_addr_t dma_get_device_base(struct device *dev,
31cf65a0f6SChristoph Hellwig struct dma_coherent_mem * mem)
32cf65a0f6SChristoph Hellwig {
33cf65a0f6SChristoph Hellwig if (mem->use_dev_dma_pfn_offset)
34e0d07278SJim Quinlan return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
35cf65a0f6SChristoph Hellwig return mem->device_base;
36cf65a0f6SChristoph Hellwig }
37cf65a0f6SChristoph Hellwig
dma_init_coherent_memory(phys_addr_t phys_addr,dma_addr_t device_addr,size_t size,bool use_dma_pfn_offset)38a6933571SChristoph Hellwig static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
39a6933571SChristoph Hellwig dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
40cf65a0f6SChristoph Hellwig {
41a6933571SChristoph Hellwig struct dma_coherent_mem *dma_mem;
42cf65a0f6SChristoph Hellwig int pages = size >> PAGE_SHIFT;
43a6933571SChristoph Hellwig void *mem_base;
44cf65a0f6SChristoph Hellwig
45a6933571SChristoph Hellwig if (!size)
46a6933571SChristoph Hellwig return ERR_PTR(-EINVAL);
47cf65a0f6SChristoph Hellwig
48cf65a0f6SChristoph Hellwig mem_base = memremap(phys_addr, size, MEMREMAP_WC);
49a6933571SChristoph Hellwig if (!mem_base)
50a6933571SChristoph Hellwig return ERR_PTR(-EINVAL);
51a6933571SChristoph Hellwig
52cf65a0f6SChristoph Hellwig dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
53a6933571SChristoph Hellwig if (!dma_mem)
54a6933571SChristoph Hellwig goto out_unmap_membase;
559fbd8dc1SChristophe JAILLET dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL);
56a6933571SChristoph Hellwig if (!dma_mem->bitmap)
57a6933571SChristoph Hellwig goto out_free_dma_mem;
58cf65a0f6SChristoph Hellwig
59cf65a0f6SChristoph Hellwig dma_mem->virt_base = mem_base;
60cf65a0f6SChristoph Hellwig dma_mem->device_base = device_addr;
61cf65a0f6SChristoph Hellwig dma_mem->pfn_base = PFN_DOWN(phys_addr);
62cf65a0f6SChristoph Hellwig dma_mem->size = pages;
63a6933571SChristoph Hellwig dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
64cf65a0f6SChristoph Hellwig spin_lock_init(&dma_mem->spinlock);
65cf65a0f6SChristoph Hellwig
66a6933571SChristoph Hellwig return dma_mem;
67cf65a0f6SChristoph Hellwig
68a6933571SChristoph Hellwig out_free_dma_mem:
69cf65a0f6SChristoph Hellwig kfree(dma_mem);
70a6933571SChristoph Hellwig out_unmap_membase:
71cf65a0f6SChristoph Hellwig memunmap(mem_base);
72a6933571SChristoph Hellwig pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
73a6933571SChristoph Hellwig &phys_addr, size / SZ_1M);
74a6933571SChristoph Hellwig return ERR_PTR(-ENOMEM);
75cf65a0f6SChristoph Hellwig }
76cf65a0f6SChristoph Hellwig
_dma_release_coherent_memory(struct dma_coherent_mem * mem)77e61c4514SMark-PK Tsai static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
78cf65a0f6SChristoph Hellwig {
79cf65a0f6SChristoph Hellwig if (!mem)
80cf65a0f6SChristoph Hellwig return;
81cf65a0f6SChristoph Hellwig
82cf65a0f6SChristoph Hellwig memunmap(mem->virt_base);
839fbd8dc1SChristophe JAILLET bitmap_free(mem->bitmap);
84cf65a0f6SChristoph Hellwig kfree(mem);
85cf65a0f6SChristoph Hellwig }
86cf65a0f6SChristoph Hellwig
dma_assign_coherent_memory(struct device * dev,struct dma_coherent_mem * mem)87cf65a0f6SChristoph Hellwig static int dma_assign_coherent_memory(struct device *dev,
88cf65a0f6SChristoph Hellwig struct dma_coherent_mem *mem)
89cf65a0f6SChristoph Hellwig {
90cf65a0f6SChristoph Hellwig if (!dev)
91cf65a0f6SChristoph Hellwig return -ENODEV;
92cf65a0f6SChristoph Hellwig
93cf65a0f6SChristoph Hellwig if (dev->dma_mem)
94cf65a0f6SChristoph Hellwig return -EBUSY;
95cf65a0f6SChristoph Hellwig
96cf65a0f6SChristoph Hellwig dev->dma_mem = mem;
97cf65a0f6SChristoph Hellwig return 0;
98cf65a0f6SChristoph Hellwig }
99cf65a0f6SChristoph Hellwig
100a92df4f6SChristoph Hellwig /*
101a92df4f6SChristoph Hellwig * Declare a region of memory to be handed out by dma_alloc_coherent() when it
102a92df4f6SChristoph Hellwig * is asked for coherent memory for this device. This shall only be used
103a92df4f6SChristoph Hellwig * from platform code, usually based on the device tree description.
104a92df4f6SChristoph Hellwig *
105a92df4f6SChristoph Hellwig * phys_addr is the CPU physical address to which the memory is currently
106a92df4f6SChristoph Hellwig * assigned (this will be ioremapped so the CPU can access the region).
107a92df4f6SChristoph Hellwig *
108a92df4f6SChristoph Hellwig * device_addr is the DMA address the device needs to be programmed with to
109a92df4f6SChristoph Hellwig * actually address this memory (this will be handed out as the dma_addr_t in
110a92df4f6SChristoph Hellwig * dma_alloc_coherent()).
111a92df4f6SChristoph Hellwig *
112a92df4f6SChristoph Hellwig * size is the size of the area (must be a multiple of PAGE_SIZE).
113a92df4f6SChristoph Hellwig *
114a92df4f6SChristoph Hellwig * As a simplification for the platforms, only *one* such region of memory may
115a92df4f6SChristoph Hellwig * be declared per device.
116a92df4f6SChristoph Hellwig */
dma_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size)117cf65a0f6SChristoph Hellwig int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
11882c5de0aSChristoph Hellwig dma_addr_t device_addr, size_t size)
119cf65a0f6SChristoph Hellwig {
120cf65a0f6SChristoph Hellwig struct dma_coherent_mem *mem;
121cf65a0f6SChristoph Hellwig int ret;
122cf65a0f6SChristoph Hellwig
123a6933571SChristoph Hellwig mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
124a6933571SChristoph Hellwig if (IS_ERR(mem))
125a6933571SChristoph Hellwig return PTR_ERR(mem);
126cf65a0f6SChristoph Hellwig
127cf65a0f6SChristoph Hellwig ret = dma_assign_coherent_memory(dev, mem);
128cf65a0f6SChristoph Hellwig if (ret)
129e61c4514SMark-PK Tsai _dma_release_coherent_memory(mem);
130cf65a0f6SChristoph Hellwig return ret;
131cf65a0f6SChristoph Hellwig }
132cf65a0f6SChristoph Hellwig
dma_release_coherent_memory(struct device * dev)133e61c4514SMark-PK Tsai void dma_release_coherent_memory(struct device *dev)
134e61c4514SMark-PK Tsai {
135*849ca053SJoakim Zhang if (dev) {
136e61c4514SMark-PK Tsai _dma_release_coherent_memory(dev->dma_mem);
137*849ca053SJoakim Zhang dev->dma_mem = NULL;
138*849ca053SJoakim Zhang }
139e61c4514SMark-PK Tsai }
140e61c4514SMark-PK Tsai
__dma_alloc_from_coherent(struct device * dev,struct dma_coherent_mem * mem,ssize_t size,dma_addr_t * dma_handle)141a445e940SVladimir Murzin static void *__dma_alloc_from_coherent(struct device *dev,
142a445e940SVladimir Murzin struct dma_coherent_mem *mem,
143cf65a0f6SChristoph Hellwig ssize_t size, dma_addr_t *dma_handle)
144cf65a0f6SChristoph Hellwig {
145cf65a0f6SChristoph Hellwig int order = get_order(size);
146cf65a0f6SChristoph Hellwig unsigned long flags;
147cf65a0f6SChristoph Hellwig int pageno;
148cf65a0f6SChristoph Hellwig void *ret;
149cf65a0f6SChristoph Hellwig
150cf65a0f6SChristoph Hellwig spin_lock_irqsave(&mem->spinlock, flags);
151cf65a0f6SChristoph Hellwig
152286c21deSKevin Grandemange if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
153cf65a0f6SChristoph Hellwig goto err;
154cf65a0f6SChristoph Hellwig
155cf65a0f6SChristoph Hellwig pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
156cf65a0f6SChristoph Hellwig if (unlikely(pageno < 0))
157cf65a0f6SChristoph Hellwig goto err;
158cf65a0f6SChristoph Hellwig
159cf65a0f6SChristoph Hellwig /*
160cf65a0f6SChristoph Hellwig * Memory was found in the coherent area.
161cf65a0f6SChristoph Hellwig */
162286c21deSKevin Grandemange *dma_handle = dma_get_device_base(dev, mem) +
163286c21deSKevin Grandemange ((dma_addr_t)pageno << PAGE_SHIFT);
164286c21deSKevin Grandemange ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
165cf65a0f6SChristoph Hellwig spin_unlock_irqrestore(&mem->spinlock, flags);
166cf65a0f6SChristoph Hellwig memset(ret, 0, size);
167cf65a0f6SChristoph Hellwig return ret;
168cf65a0f6SChristoph Hellwig err:
169cf65a0f6SChristoph Hellwig spin_unlock_irqrestore(&mem->spinlock, flags);
170cf65a0f6SChristoph Hellwig return NULL;
171cf65a0f6SChristoph Hellwig }
172cf65a0f6SChristoph Hellwig
173cf65a0f6SChristoph Hellwig /**
174cf65a0f6SChristoph Hellwig * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
175cf65a0f6SChristoph Hellwig * @dev: device from which we allocate memory
176cf65a0f6SChristoph Hellwig * @size: size of requested memory area
177cf65a0f6SChristoph Hellwig * @dma_handle: This will be filled with the correct dma handle
178cf65a0f6SChristoph Hellwig * @ret: This pointer will be filled with the virtual address
179cf65a0f6SChristoph Hellwig * to allocated area.
180cf65a0f6SChristoph Hellwig *
181cf65a0f6SChristoph Hellwig * This function should be only called from per-arch dma_alloc_coherent()
182cf65a0f6SChristoph Hellwig * to support allocation from per-device coherent memory pools.
183cf65a0f6SChristoph Hellwig *
184cf65a0f6SChristoph Hellwig * Returns 0 if dma_alloc_coherent should continue with allocating from
185cf65a0f6SChristoph Hellwig * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
186cf65a0f6SChristoph Hellwig */
dma_alloc_from_dev_coherent(struct device * dev,ssize_t size,dma_addr_t * dma_handle,void ** ret)187cf65a0f6SChristoph Hellwig int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
188cf65a0f6SChristoph Hellwig dma_addr_t *dma_handle, void **ret)
189cf65a0f6SChristoph Hellwig {
190cf65a0f6SChristoph Hellwig struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
191cf65a0f6SChristoph Hellwig
192cf65a0f6SChristoph Hellwig if (!mem)
193cf65a0f6SChristoph Hellwig return 0;
194cf65a0f6SChristoph Hellwig
195a445e940SVladimir Murzin *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
196cf65a0f6SChristoph Hellwig return 1;
197cf65a0f6SChristoph Hellwig }
198cf65a0f6SChristoph Hellwig
__dma_release_from_coherent(struct dma_coherent_mem * mem,int order,void * vaddr)199cf65a0f6SChristoph Hellwig static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
200cf65a0f6SChristoph Hellwig int order, void *vaddr)
201cf65a0f6SChristoph Hellwig {
202cf65a0f6SChristoph Hellwig if (mem && vaddr >= mem->virt_base && vaddr <
203286c21deSKevin Grandemange (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
204cf65a0f6SChristoph Hellwig int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
205cf65a0f6SChristoph Hellwig unsigned long flags;
206cf65a0f6SChristoph Hellwig
207cf65a0f6SChristoph Hellwig spin_lock_irqsave(&mem->spinlock, flags);
208cf65a0f6SChristoph Hellwig bitmap_release_region(mem->bitmap, page, order);
209cf65a0f6SChristoph Hellwig spin_unlock_irqrestore(&mem->spinlock, flags);
210cf65a0f6SChristoph Hellwig return 1;
211cf65a0f6SChristoph Hellwig }
212cf65a0f6SChristoph Hellwig return 0;
213cf65a0f6SChristoph Hellwig }
214cf65a0f6SChristoph Hellwig
215cf65a0f6SChristoph Hellwig /**
216cf65a0f6SChristoph Hellwig * dma_release_from_dev_coherent() - free memory to device coherent memory pool
217cf65a0f6SChristoph Hellwig * @dev: device from which the memory was allocated
218cf65a0f6SChristoph Hellwig * @order: the order of pages allocated
219cf65a0f6SChristoph Hellwig * @vaddr: virtual address of allocated pages
220cf65a0f6SChristoph Hellwig *
221cf65a0f6SChristoph Hellwig * This checks whether the memory was allocated from the per-device
222cf65a0f6SChristoph Hellwig * coherent memory pool and if so, releases that memory.
223cf65a0f6SChristoph Hellwig *
224cf65a0f6SChristoph Hellwig * Returns 1 if we correctly released the memory, or 0 if the caller should
225cf65a0f6SChristoph Hellwig * proceed with releasing memory from generic pools.
226cf65a0f6SChristoph Hellwig */
dma_release_from_dev_coherent(struct device * dev,int order,void * vaddr)227cf65a0f6SChristoph Hellwig int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
228cf65a0f6SChristoph Hellwig {
229cf65a0f6SChristoph Hellwig struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
230cf65a0f6SChristoph Hellwig
231cf65a0f6SChristoph Hellwig return __dma_release_from_coherent(mem, order, vaddr);
232cf65a0f6SChristoph Hellwig }
233cf65a0f6SChristoph Hellwig
__dma_mmap_from_coherent(struct dma_coherent_mem * mem,struct vm_area_struct * vma,void * vaddr,size_t size,int * ret)234cf65a0f6SChristoph Hellwig static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
235cf65a0f6SChristoph Hellwig struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
236cf65a0f6SChristoph Hellwig {
237cf65a0f6SChristoph Hellwig if (mem && vaddr >= mem->virt_base && vaddr + size <=
238286c21deSKevin Grandemange (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
239cf65a0f6SChristoph Hellwig unsigned long off = vma->vm_pgoff;
240cf65a0f6SChristoph Hellwig int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
241286c21deSKevin Grandemange unsigned long user_count = vma_pages(vma);
242cf65a0f6SChristoph Hellwig int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
243cf65a0f6SChristoph Hellwig
244cf65a0f6SChristoph Hellwig *ret = -ENXIO;
245cf65a0f6SChristoph Hellwig if (off < count && user_count <= count - off) {
246cf65a0f6SChristoph Hellwig unsigned long pfn = mem->pfn_base + start + off;
247cf65a0f6SChristoph Hellwig *ret = remap_pfn_range(vma, vma->vm_start, pfn,
248cf65a0f6SChristoph Hellwig user_count << PAGE_SHIFT,
249cf65a0f6SChristoph Hellwig vma->vm_page_prot);
250cf65a0f6SChristoph Hellwig }
251cf65a0f6SChristoph Hellwig return 1;
252cf65a0f6SChristoph Hellwig }
253cf65a0f6SChristoph Hellwig return 0;
254cf65a0f6SChristoph Hellwig }
255cf65a0f6SChristoph Hellwig
256cf65a0f6SChristoph Hellwig /**
257cf65a0f6SChristoph Hellwig * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
258cf65a0f6SChristoph Hellwig * @dev: device from which the memory was allocated
259cf65a0f6SChristoph Hellwig * @vma: vm_area for the userspace memory
260cf65a0f6SChristoph Hellwig * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
261cf65a0f6SChristoph Hellwig * @size: size of the memory buffer allocated
262cf65a0f6SChristoph Hellwig * @ret: result from remap_pfn_range()
263cf65a0f6SChristoph Hellwig *
264cf65a0f6SChristoph Hellwig * This checks whether the memory was allocated from the per-device
265cf65a0f6SChristoph Hellwig * coherent memory pool and if so, maps that memory to the provided vma.
266cf65a0f6SChristoph Hellwig *
267cf65a0f6SChristoph Hellwig * Returns 1 if @vaddr belongs to the device coherent pool and the caller
268cf65a0f6SChristoph Hellwig * should return @ret, or 0 if they should proceed with mapping memory from
269cf65a0f6SChristoph Hellwig * generic areas.
270cf65a0f6SChristoph Hellwig */
dma_mmap_from_dev_coherent(struct device * dev,struct vm_area_struct * vma,void * vaddr,size_t size,int * ret)271cf65a0f6SChristoph Hellwig int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
272cf65a0f6SChristoph Hellwig void *vaddr, size_t size, int *ret)
273cf65a0f6SChristoph Hellwig {
274cf65a0f6SChristoph Hellwig struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
275cf65a0f6SChristoph Hellwig
276cf65a0f6SChristoph Hellwig return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
277cf65a0f6SChristoph Hellwig }
278cf65a0f6SChristoph Hellwig
27922f9feb4SChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL
28022f9feb4SChristoph Hellwig static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
28122f9feb4SChristoph Hellwig
dma_alloc_from_global_coherent(struct device * dev,ssize_t size,dma_addr_t * dma_handle)28222f9feb4SChristoph Hellwig void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
28322f9feb4SChristoph Hellwig dma_addr_t *dma_handle)
28422f9feb4SChristoph Hellwig {
28522f9feb4SChristoph Hellwig if (!dma_coherent_default_memory)
28622f9feb4SChristoph Hellwig return NULL;
28722f9feb4SChristoph Hellwig
28822f9feb4SChristoph Hellwig return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
28922f9feb4SChristoph Hellwig dma_handle);
29022f9feb4SChristoph Hellwig }
29122f9feb4SChristoph Hellwig
dma_release_from_global_coherent(int order,void * vaddr)29222f9feb4SChristoph Hellwig int dma_release_from_global_coherent(int order, void *vaddr)
29322f9feb4SChristoph Hellwig {
29422f9feb4SChristoph Hellwig if (!dma_coherent_default_memory)
29522f9feb4SChristoph Hellwig return 0;
29622f9feb4SChristoph Hellwig
29722f9feb4SChristoph Hellwig return __dma_release_from_coherent(dma_coherent_default_memory, order,
29822f9feb4SChristoph Hellwig vaddr);
29922f9feb4SChristoph Hellwig }
30022f9feb4SChristoph Hellwig
dma_mmap_from_global_coherent(struct vm_area_struct * vma,void * vaddr,size_t size,int * ret)301cf65a0f6SChristoph Hellwig int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
302cf65a0f6SChristoph Hellwig size_t size, int *ret)
303cf65a0f6SChristoph Hellwig {
304cf65a0f6SChristoph Hellwig if (!dma_coherent_default_memory)
305cf65a0f6SChristoph Hellwig return 0;
306cf65a0f6SChristoph Hellwig
307cf65a0f6SChristoph Hellwig return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
308cf65a0f6SChristoph Hellwig vaddr, size, ret);
309cf65a0f6SChristoph Hellwig }
310cf65a0f6SChristoph Hellwig
dma_init_global_coherent(phys_addr_t phys_addr,size_t size)31139a2d350SChristoph Hellwig int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
31239a2d350SChristoph Hellwig {
31339a2d350SChristoph Hellwig struct dma_coherent_mem *mem;
31439a2d350SChristoph Hellwig
31539a2d350SChristoph Hellwig mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
31639a2d350SChristoph Hellwig if (IS_ERR(mem))
31739a2d350SChristoph Hellwig return PTR_ERR(mem);
31839a2d350SChristoph Hellwig dma_coherent_default_memory = mem;
31939a2d350SChristoph Hellwig pr_info("DMA: default coherent area is set\n");
32039a2d350SChristoph Hellwig return 0;
32139a2d350SChristoph Hellwig }
32222f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_GLOBAL_POOL */
32339a2d350SChristoph Hellwig
324cf65a0f6SChristoph Hellwig /*
325cf65a0f6SChristoph Hellwig * Support for reserved memory regions defined in device tree
326cf65a0f6SChristoph Hellwig */
327cf65a0f6SChristoph Hellwig #ifdef CONFIG_OF_RESERVED_MEM
328cf65a0f6SChristoph Hellwig #include <linux/of.h>
329cf65a0f6SChristoph Hellwig #include <linux/of_fdt.h>
330cf65a0f6SChristoph Hellwig #include <linux/of_reserved_mem.h>
331cf65a0f6SChristoph Hellwig
33222f9feb4SChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL
333cf65a0f6SChristoph Hellwig static struct reserved_mem *dma_reserved_default_memory __initdata;
33422f9feb4SChristoph Hellwig #endif
335cf65a0f6SChristoph Hellwig
rmem_dma_device_init(struct reserved_mem * rmem,struct device * dev)336cf65a0f6SChristoph Hellwig static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
337cf65a0f6SChristoph Hellwig {
338a6933571SChristoph Hellwig if (!rmem->priv) {
339a6933571SChristoph Hellwig struct dma_coherent_mem *mem;
340cf65a0f6SChristoph Hellwig
341a6933571SChristoph Hellwig mem = dma_init_coherent_memory(rmem->base, rmem->base,
342a6933571SChristoph Hellwig rmem->size, true);
343a6933571SChristoph Hellwig if (IS_ERR(mem))
344a6933571SChristoph Hellwig return PTR_ERR(mem);
345cf65a0f6SChristoph Hellwig rmem->priv = mem;
346a6933571SChristoph Hellwig }
347a6933571SChristoph Hellwig dma_assign_coherent_memory(dev, rmem->priv);
348cf65a0f6SChristoph Hellwig return 0;
349cf65a0f6SChristoph Hellwig }
350cf65a0f6SChristoph Hellwig
rmem_dma_device_release(struct reserved_mem * rmem,struct device * dev)351cf65a0f6SChristoph Hellwig static void rmem_dma_device_release(struct reserved_mem *rmem,
352cf65a0f6SChristoph Hellwig struct device *dev)
353cf65a0f6SChristoph Hellwig {
354cf65a0f6SChristoph Hellwig if (dev)
355cf65a0f6SChristoph Hellwig dev->dma_mem = NULL;
356cf65a0f6SChristoph Hellwig }
357cf65a0f6SChristoph Hellwig
358cf65a0f6SChristoph Hellwig static const struct reserved_mem_ops rmem_dma_ops = {
359cf65a0f6SChristoph Hellwig .device_init = rmem_dma_device_init,
360cf65a0f6SChristoph Hellwig .device_release = rmem_dma_device_release,
361cf65a0f6SChristoph Hellwig };
362cf65a0f6SChristoph Hellwig
rmem_dma_setup(struct reserved_mem * rmem)363cf65a0f6SChristoph Hellwig static int __init rmem_dma_setup(struct reserved_mem *rmem)
364cf65a0f6SChristoph Hellwig {
365cf65a0f6SChristoph Hellwig unsigned long node = rmem->fdt_node;
366cf65a0f6SChristoph Hellwig
367cf65a0f6SChristoph Hellwig if (of_get_flat_dt_prop(node, "reusable", NULL))
368cf65a0f6SChristoph Hellwig return -EINVAL;
369cf65a0f6SChristoph Hellwig
370cf65a0f6SChristoph Hellwig #ifdef CONFIG_ARM
371cf65a0f6SChristoph Hellwig if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
372cf65a0f6SChristoph Hellwig pr_err("Reserved memory: regions without no-map are not yet supported\n");
373cf65a0f6SChristoph Hellwig return -EINVAL;
374cf65a0f6SChristoph Hellwig }
37570d6aa0eSChristoph Hellwig #endif
376cf65a0f6SChristoph Hellwig
37770d6aa0eSChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL
378cf65a0f6SChristoph Hellwig if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
379cf65a0f6SChristoph Hellwig WARN(dma_reserved_default_memory,
380cf65a0f6SChristoph Hellwig "Reserved memory: region for default DMA coherent area is redefined\n");
381cf65a0f6SChristoph Hellwig dma_reserved_default_memory = rmem;
382cf65a0f6SChristoph Hellwig }
383cf65a0f6SChristoph Hellwig #endif
384cf65a0f6SChristoph Hellwig
385cf65a0f6SChristoph Hellwig rmem->ops = &rmem_dma_ops;
386cf65a0f6SChristoph Hellwig pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
387cf65a0f6SChristoph Hellwig &rmem->base, (unsigned long)rmem->size / SZ_1M);
388cf65a0f6SChristoph Hellwig return 0;
389cf65a0f6SChristoph Hellwig }
390cf65a0f6SChristoph Hellwig
39122f9feb4SChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL
dma_init_reserved_memory(void)392cf65a0f6SChristoph Hellwig static int __init dma_init_reserved_memory(void)
393cf65a0f6SChristoph Hellwig {
394cf65a0f6SChristoph Hellwig if (!dma_reserved_default_memory)
395cf65a0f6SChristoph Hellwig return -ENOMEM;
39639a2d350SChristoph Hellwig return dma_init_global_coherent(dma_reserved_default_memory->base,
39739a2d350SChristoph Hellwig dma_reserved_default_memory->size);
398cf65a0f6SChristoph Hellwig }
399cf65a0f6SChristoph Hellwig core_initcall(dma_init_reserved_memory);
40022f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_GLOBAL_POOL */
401cf65a0f6SChristoph Hellwig
402cf65a0f6SChristoph Hellwig RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
403cf65a0f6SChristoph Hellwig #endif
404