1b61614ecSJohn Stultz // SPDX-License-Identifier: GPL-2.0
2b61614ecSJohn Stultz /*
3b61614ecSJohn Stultz * DMABUF CMA heap exporter
4b61614ecSJohn Stultz *
5a5d2d29eSJohn Stultz * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6b61614ecSJohn Stultz * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7a5d2d29eSJohn Stultz *
8a5d2d29eSJohn Stultz * Also utilizing parts of Andrew Davis' SRAM heap:
9a5d2d29eSJohn Stultz * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10a5d2d29eSJohn Stultz * Andrew F. Davis <afd@ti.com>
11b61614ecSJohn Stultz */
12b61614ecSJohn Stultz #include <linux/cma.h>
13b61614ecSJohn Stultz #include <linux/dma-buf.h>
14b61614ecSJohn Stultz #include <linux/dma-heap.h>
150b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
16b61614ecSJohn Stultz #include <linux/err.h>
17b61614ecSJohn Stultz #include <linux/highmem.h>
18a5d2d29eSJohn Stultz #include <linux/io.h>
19a5d2d29eSJohn Stultz #include <linux/mm.h>
20b61614ecSJohn Stultz #include <linux/module.h>
21b61614ecSJohn Stultz #include <linux/scatterlist.h>
22a5d2d29eSJohn Stultz #include <linux/slab.h>
238075c300SJohn Stultz #include <linux/vmalloc.h>
24b61614ecSJohn Stultz
25b61614ecSJohn Stultz
26b61614ecSJohn Stultz struct cma_heap {
27b61614ecSJohn Stultz struct dma_heap *heap;
28b61614ecSJohn Stultz struct cma *cma;
29b61614ecSJohn Stultz };
30b61614ecSJohn Stultz
31a5d2d29eSJohn Stultz struct cma_heap_buffer {
32a5d2d29eSJohn Stultz struct cma_heap *heap;
33a5d2d29eSJohn Stultz struct list_head attachments;
34a5d2d29eSJohn Stultz struct mutex lock;
35a5d2d29eSJohn Stultz unsigned long len;
36a5d2d29eSJohn Stultz struct page *cma_pages;
37a5d2d29eSJohn Stultz struct page **pages;
38a5d2d29eSJohn Stultz pgoff_t pagecount;
39a5d2d29eSJohn Stultz int vmap_cnt;
40a5d2d29eSJohn Stultz void *vaddr;
41a5d2d29eSJohn Stultz };
42b61614ecSJohn Stultz
43a5d2d29eSJohn Stultz struct dma_heap_attachment {
44a5d2d29eSJohn Stultz struct device *dev;
45a5d2d29eSJohn Stultz struct sg_table table;
46a5d2d29eSJohn Stultz struct list_head list;
474c68e499SJohn Stultz bool mapped;
48a5d2d29eSJohn Stultz };
49a5d2d29eSJohn Stultz
cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)50a5d2d29eSJohn Stultz static int cma_heap_attach(struct dma_buf *dmabuf,
51a5d2d29eSJohn Stultz struct dma_buf_attachment *attachment)
52a5d2d29eSJohn Stultz {
53a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
54a5d2d29eSJohn Stultz struct dma_heap_attachment *a;
55a5d2d29eSJohn Stultz int ret;
56a5d2d29eSJohn Stultz
57a5d2d29eSJohn Stultz a = kzalloc(sizeof(*a), GFP_KERNEL);
58a5d2d29eSJohn Stultz if (!a)
59a5d2d29eSJohn Stultz return -ENOMEM;
60a5d2d29eSJohn Stultz
61a5d2d29eSJohn Stultz ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
62a5d2d29eSJohn Stultz buffer->pagecount, 0,
63a5d2d29eSJohn Stultz buffer->pagecount << PAGE_SHIFT,
64a5d2d29eSJohn Stultz GFP_KERNEL);
65a5d2d29eSJohn Stultz if (ret) {
66a5d2d29eSJohn Stultz kfree(a);
67a5d2d29eSJohn Stultz return ret;
68a5d2d29eSJohn Stultz }
69a5d2d29eSJohn Stultz
70a5d2d29eSJohn Stultz a->dev = attachment->dev;
71a5d2d29eSJohn Stultz INIT_LIST_HEAD(&a->list);
724c68e499SJohn Stultz a->mapped = false;
73a5d2d29eSJohn Stultz
74a5d2d29eSJohn Stultz attachment->priv = a;
75a5d2d29eSJohn Stultz
76a5d2d29eSJohn Stultz mutex_lock(&buffer->lock);
77a5d2d29eSJohn Stultz list_add(&a->list, &buffer->attachments);
78a5d2d29eSJohn Stultz mutex_unlock(&buffer->lock);
79a5d2d29eSJohn Stultz
80a5d2d29eSJohn Stultz return 0;
81a5d2d29eSJohn Stultz }
82a5d2d29eSJohn Stultz
cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)83a5d2d29eSJohn Stultz static void cma_heap_detach(struct dma_buf *dmabuf,
84a5d2d29eSJohn Stultz struct dma_buf_attachment *attachment)
85a5d2d29eSJohn Stultz {
86a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
87a5d2d29eSJohn Stultz struct dma_heap_attachment *a = attachment->priv;
88a5d2d29eSJohn Stultz
89a5d2d29eSJohn Stultz mutex_lock(&buffer->lock);
90a5d2d29eSJohn Stultz list_del(&a->list);
91a5d2d29eSJohn Stultz mutex_unlock(&buffer->lock);
92a5d2d29eSJohn Stultz
93a5d2d29eSJohn Stultz sg_free_table(&a->table);
94a5d2d29eSJohn Stultz kfree(a);
95a5d2d29eSJohn Stultz }
96a5d2d29eSJohn Stultz
cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)97a5d2d29eSJohn Stultz static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98a5d2d29eSJohn Stultz enum dma_data_direction direction)
99a5d2d29eSJohn Stultz {
100a5d2d29eSJohn Stultz struct dma_heap_attachment *a = attachment->priv;
101a5d2d29eSJohn Stultz struct sg_table *table = &a->table;
102a5d2d29eSJohn Stultz int ret;
103a5d2d29eSJohn Stultz
104a5d2d29eSJohn Stultz ret = dma_map_sgtable(attachment->dev, table, direction, 0);
105a5d2d29eSJohn Stultz if (ret)
106a5d2d29eSJohn Stultz return ERR_PTR(-ENOMEM);
1074c68e499SJohn Stultz a->mapped = true;
108a5d2d29eSJohn Stultz return table;
109a5d2d29eSJohn Stultz }
110a5d2d29eSJohn Stultz
cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)111a5d2d29eSJohn Stultz static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
112a5d2d29eSJohn Stultz struct sg_table *table,
113a5d2d29eSJohn Stultz enum dma_data_direction direction)
114a5d2d29eSJohn Stultz {
1154c68e499SJohn Stultz struct dma_heap_attachment *a = attachment->priv;
1164c68e499SJohn Stultz
1174c68e499SJohn Stultz a->mapped = false;
118a5d2d29eSJohn Stultz dma_unmap_sgtable(attachment->dev, table, direction, 0);
119a5d2d29eSJohn Stultz }
120a5d2d29eSJohn Stultz
cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)121a5d2d29eSJohn Stultz static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
122a5d2d29eSJohn Stultz enum dma_data_direction direction)
123a5d2d29eSJohn Stultz {
124a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
125a5d2d29eSJohn Stultz struct dma_heap_attachment *a;
126a5d2d29eSJohn Stultz
12754329e6fSWeizhao Ouyang mutex_lock(&buffer->lock);
12854329e6fSWeizhao Ouyang
129a5d2d29eSJohn Stultz if (buffer->vmap_cnt)
130a5d2d29eSJohn Stultz invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
131a5d2d29eSJohn Stultz
132a5d2d29eSJohn Stultz list_for_each_entry(a, &buffer->attachments, list) {
1334c68e499SJohn Stultz if (!a->mapped)
1344c68e499SJohn Stultz continue;
135a5d2d29eSJohn Stultz dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
136a5d2d29eSJohn Stultz }
137a5d2d29eSJohn Stultz mutex_unlock(&buffer->lock);
138a5d2d29eSJohn Stultz
139a5d2d29eSJohn Stultz return 0;
140a5d2d29eSJohn Stultz }
141a5d2d29eSJohn Stultz
cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)142a5d2d29eSJohn Stultz static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
143a5d2d29eSJohn Stultz enum dma_data_direction direction)
144a5d2d29eSJohn Stultz {
145a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
146a5d2d29eSJohn Stultz struct dma_heap_attachment *a;
147a5d2d29eSJohn Stultz
14854329e6fSWeizhao Ouyang mutex_lock(&buffer->lock);
14954329e6fSWeizhao Ouyang
150a5d2d29eSJohn Stultz if (buffer->vmap_cnt)
151a5d2d29eSJohn Stultz flush_kernel_vmap_range(buffer->vaddr, buffer->len);
152a5d2d29eSJohn Stultz
153a5d2d29eSJohn Stultz list_for_each_entry(a, &buffer->attachments, list) {
1544c68e499SJohn Stultz if (!a->mapped)
1554c68e499SJohn Stultz continue;
156a5d2d29eSJohn Stultz dma_sync_sgtable_for_device(a->dev, &a->table, direction);
157a5d2d29eSJohn Stultz }
158a5d2d29eSJohn Stultz mutex_unlock(&buffer->lock);
159a5d2d29eSJohn Stultz
160a5d2d29eSJohn Stultz return 0;
161a5d2d29eSJohn Stultz }
162a5d2d29eSJohn Stultz
cma_heap_vm_fault(struct vm_fault * vmf)163a5d2d29eSJohn Stultz static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
164a5d2d29eSJohn Stultz {
165a5d2d29eSJohn Stultz struct vm_area_struct *vma = vmf->vma;
166a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = vma->vm_private_data;
167a5d2d29eSJohn Stultz
168*eb7fc8b6ST.J. Mercier if (vmf->pgoff >= buffer->pagecount)
169a5d2d29eSJohn Stultz return VM_FAULT_SIGBUS;
170a5d2d29eSJohn Stultz
171a5d2d29eSJohn Stultz vmf->page = buffer->pages[vmf->pgoff];
172a5d2d29eSJohn Stultz get_page(vmf->page);
173a5d2d29eSJohn Stultz
174a5d2d29eSJohn Stultz return 0;
175a5d2d29eSJohn Stultz }
176a5d2d29eSJohn Stultz
177a5d2d29eSJohn Stultz static const struct vm_operations_struct dma_heap_vm_ops = {
178a5d2d29eSJohn Stultz .fault = cma_heap_vm_fault,
179a5d2d29eSJohn Stultz };
180a5d2d29eSJohn Stultz
cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)181a5d2d29eSJohn Stultz static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
182a5d2d29eSJohn Stultz {
183a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
184a5d2d29eSJohn Stultz
185a5d2d29eSJohn Stultz if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
186a5d2d29eSJohn Stultz return -EINVAL;
187a5d2d29eSJohn Stultz
188a5d2d29eSJohn Stultz vma->vm_ops = &dma_heap_vm_ops;
189a5d2d29eSJohn Stultz vma->vm_private_data = buffer;
190a5d2d29eSJohn Stultz
191a5d2d29eSJohn Stultz return 0;
192a5d2d29eSJohn Stultz }
193a5d2d29eSJohn Stultz
cma_heap_do_vmap(struct cma_heap_buffer * buffer)194a5d2d29eSJohn Stultz static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
195a5d2d29eSJohn Stultz {
196a5d2d29eSJohn Stultz void *vaddr;
197a5d2d29eSJohn Stultz
198a5d2d29eSJohn Stultz vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
199a5d2d29eSJohn Stultz if (!vaddr)
200a5d2d29eSJohn Stultz return ERR_PTR(-ENOMEM);
201a5d2d29eSJohn Stultz
202a5d2d29eSJohn Stultz return vaddr;
203a5d2d29eSJohn Stultz }
204a5d2d29eSJohn Stultz
cma_heap_vmap(struct dma_buf * dmabuf,struct iosys_map * map)2057938f421SLucas De Marchi static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
206a5d2d29eSJohn Stultz {
207a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
208a5d2d29eSJohn Stultz void *vaddr;
209a5d2d29eSJohn Stultz int ret = 0;
210a5d2d29eSJohn Stultz
211a5d2d29eSJohn Stultz mutex_lock(&buffer->lock);
212a5d2d29eSJohn Stultz if (buffer->vmap_cnt) {
213a5d2d29eSJohn Stultz buffer->vmap_cnt++;
2147938f421SLucas De Marchi iosys_map_set_vaddr(map, buffer->vaddr);
215a5d2d29eSJohn Stultz goto out;
216a5d2d29eSJohn Stultz }
217a5d2d29eSJohn Stultz
218a5d2d29eSJohn Stultz vaddr = cma_heap_do_vmap(buffer);
219a5d2d29eSJohn Stultz if (IS_ERR(vaddr)) {
220a5d2d29eSJohn Stultz ret = PTR_ERR(vaddr);
221a5d2d29eSJohn Stultz goto out;
222a5d2d29eSJohn Stultz }
223a5d2d29eSJohn Stultz buffer->vaddr = vaddr;
224a5d2d29eSJohn Stultz buffer->vmap_cnt++;
2257938f421SLucas De Marchi iosys_map_set_vaddr(map, buffer->vaddr);
226a5d2d29eSJohn Stultz out:
227a5d2d29eSJohn Stultz mutex_unlock(&buffer->lock);
228a5d2d29eSJohn Stultz
229a5d2d29eSJohn Stultz return ret;
230a5d2d29eSJohn Stultz }
231a5d2d29eSJohn Stultz
cma_heap_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)2327938f421SLucas De Marchi static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
233a5d2d29eSJohn Stultz {
234a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
235a5d2d29eSJohn Stultz
236a5d2d29eSJohn Stultz mutex_lock(&buffer->lock);
237a5d2d29eSJohn Stultz if (!--buffer->vmap_cnt) {
238a5d2d29eSJohn Stultz vunmap(buffer->vaddr);
239a5d2d29eSJohn Stultz buffer->vaddr = NULL;
240a5d2d29eSJohn Stultz }
241a5d2d29eSJohn Stultz mutex_unlock(&buffer->lock);
2427938f421SLucas De Marchi iosys_map_clear(map);
243a5d2d29eSJohn Stultz }
244a5d2d29eSJohn Stultz
cma_heap_dma_buf_release(struct dma_buf * dmabuf)245a5d2d29eSJohn Stultz static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
246a5d2d29eSJohn Stultz {
247a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer = dmabuf->priv;
248a5d2d29eSJohn Stultz struct cma_heap *cma_heap = buffer->heap;
249a5d2d29eSJohn Stultz
250a5d2d29eSJohn Stultz if (buffer->vmap_cnt > 0) {
251a5d2d29eSJohn Stultz WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
252a5d2d29eSJohn Stultz vunmap(buffer->vaddr);
253a5d2d29eSJohn Stultz buffer->vaddr = NULL;
254a5d2d29eSJohn Stultz }
255a5d2d29eSJohn Stultz
256a0adc8eaSJohn Stultz /* free page list */
257a0adc8eaSJohn Stultz kfree(buffer->pages);
258a0adc8eaSJohn Stultz /* release memory */
259a5d2d29eSJohn Stultz cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
260b61614ecSJohn Stultz kfree(buffer);
261b61614ecSJohn Stultz }
262b61614ecSJohn Stultz
263a5d2d29eSJohn Stultz static const struct dma_buf_ops cma_heap_buf_ops = {
264a5d2d29eSJohn Stultz .attach = cma_heap_attach,
265a5d2d29eSJohn Stultz .detach = cma_heap_detach,
266a5d2d29eSJohn Stultz .map_dma_buf = cma_heap_map_dma_buf,
267a5d2d29eSJohn Stultz .unmap_dma_buf = cma_heap_unmap_dma_buf,
268a5d2d29eSJohn Stultz .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
269a5d2d29eSJohn Stultz .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
270a5d2d29eSJohn Stultz .mmap = cma_heap_mmap,
271a5d2d29eSJohn Stultz .vmap = cma_heap_vmap,
272a5d2d29eSJohn Stultz .vunmap = cma_heap_vunmap,
273a5d2d29eSJohn Stultz .release = cma_heap_dma_buf_release,
274a5d2d29eSJohn Stultz };
275a5d2d29eSJohn Stultz
cma_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)276abf4451bSJohn Stultz static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
277b61614ecSJohn Stultz unsigned long len,
278b61614ecSJohn Stultz unsigned long fd_flags,
279b61614ecSJohn Stultz unsigned long heap_flags)
280b61614ecSJohn Stultz {
281b61614ecSJohn Stultz struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
282a5d2d29eSJohn Stultz struct cma_heap_buffer *buffer;
283a5d2d29eSJohn Stultz DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
284b61614ecSJohn Stultz size_t size = PAGE_ALIGN(len);
285a5d2d29eSJohn Stultz pgoff_t pagecount = size >> PAGE_SHIFT;
286b61614ecSJohn Stultz unsigned long align = get_order(size);
287a5d2d29eSJohn Stultz struct page *cma_pages;
288b61614ecSJohn Stultz struct dma_buf *dmabuf;
289b61614ecSJohn Stultz int ret = -ENOMEM;
290b61614ecSJohn Stultz pgoff_t pg;
291b61614ecSJohn Stultz
292a5d2d29eSJohn Stultz buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
293a5d2d29eSJohn Stultz if (!buffer)
294abf4451bSJohn Stultz return ERR_PTR(-ENOMEM);
295a5d2d29eSJohn Stultz
296a5d2d29eSJohn Stultz INIT_LIST_HEAD(&buffer->attachments);
297a5d2d29eSJohn Stultz mutex_init(&buffer->lock);
298a5d2d29eSJohn Stultz buffer->len = size;
299a5d2d29eSJohn Stultz
300b61614ecSJohn Stultz if (align > CONFIG_CMA_ALIGNMENT)
301b61614ecSJohn Stultz align = CONFIG_CMA_ALIGNMENT;
302b61614ecSJohn Stultz
303a5d2d29eSJohn Stultz cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
304b61614ecSJohn Stultz if (!cma_pages)
305a5d2d29eSJohn Stultz goto free_buffer;
306b61614ecSJohn Stultz
307a5d2d29eSJohn Stultz /* Clear the cma pages */
308b61614ecSJohn Stultz if (PageHighMem(cma_pages)) {
309a5d2d29eSJohn Stultz unsigned long nr_clear_pages = pagecount;
310b61614ecSJohn Stultz struct page *page = cma_pages;
311b61614ecSJohn Stultz
312b61614ecSJohn Stultz while (nr_clear_pages > 0) {
313b61614ecSJohn Stultz void *vaddr = kmap_atomic(page);
314b61614ecSJohn Stultz
315b61614ecSJohn Stultz memset(vaddr, 0, PAGE_SIZE);
316b61614ecSJohn Stultz kunmap_atomic(vaddr);
317b61614ecSJohn Stultz /*
318b61614ecSJohn Stultz * Avoid wasting time zeroing memory if the process
319b61614ecSJohn Stultz * has been killed by by SIGKILL
320b61614ecSJohn Stultz */
321b61614ecSJohn Stultz if (fatal_signal_pending(current))
322b61614ecSJohn Stultz goto free_cma;
323b61614ecSJohn Stultz page++;
324b61614ecSJohn Stultz nr_clear_pages--;
325b61614ecSJohn Stultz }
326b61614ecSJohn Stultz } else {
327b61614ecSJohn Stultz memset(page_address(cma_pages), 0, size);
328b61614ecSJohn Stultz }
329b61614ecSJohn Stultz
330a5d2d29eSJohn Stultz buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
331a5d2d29eSJohn Stultz if (!buffer->pages) {
332b61614ecSJohn Stultz ret = -ENOMEM;
333b61614ecSJohn Stultz goto free_cma;
334b61614ecSJohn Stultz }
335b61614ecSJohn Stultz
336a5d2d29eSJohn Stultz for (pg = 0; pg < pagecount; pg++)
337a5d2d29eSJohn Stultz buffer->pages[pg] = &cma_pages[pg];
338a5d2d29eSJohn Stultz
339a5d2d29eSJohn Stultz buffer->cma_pages = cma_pages;
340a5d2d29eSJohn Stultz buffer->heap = cma_heap;
341a5d2d29eSJohn Stultz buffer->pagecount = pagecount;
342b61614ecSJohn Stultz
343b61614ecSJohn Stultz /* create the dmabuf */
3442eebbdbaSJohn Stultz exp_info.exp_name = dma_heap_get_name(heap);
345a5d2d29eSJohn Stultz exp_info.ops = &cma_heap_buf_ops;
346a5d2d29eSJohn Stultz exp_info.size = buffer->len;
347a5d2d29eSJohn Stultz exp_info.flags = fd_flags;
348a5d2d29eSJohn Stultz exp_info.priv = buffer;
349a5d2d29eSJohn Stultz dmabuf = dma_buf_export(&exp_info);
350b61614ecSJohn Stultz if (IS_ERR(dmabuf)) {
351b61614ecSJohn Stultz ret = PTR_ERR(dmabuf);
352b61614ecSJohn Stultz goto free_pages;
353b61614ecSJohn Stultz }
354abf4451bSJohn Stultz return dmabuf;
355b61614ecSJohn Stultz
356b61614ecSJohn Stultz free_pages:
357a5d2d29eSJohn Stultz kfree(buffer->pages);
358b61614ecSJohn Stultz free_cma:
359a5d2d29eSJohn Stultz cma_release(cma_heap->cma, cma_pages, pagecount);
360a5d2d29eSJohn Stultz free_buffer:
361a5d2d29eSJohn Stultz kfree(buffer);
362a5d2d29eSJohn Stultz
363abf4451bSJohn Stultz return ERR_PTR(ret);
364b61614ecSJohn Stultz }
365b61614ecSJohn Stultz
366b61614ecSJohn Stultz static const struct dma_heap_ops cma_heap_ops = {
367b61614ecSJohn Stultz .allocate = cma_heap_allocate,
368b61614ecSJohn Stultz };
369b61614ecSJohn Stultz
__add_cma_heap(struct cma * cma,void * data)370b61614ecSJohn Stultz static int __add_cma_heap(struct cma *cma, void *data)
371b61614ecSJohn Stultz {
372b61614ecSJohn Stultz struct cma_heap *cma_heap;
373b61614ecSJohn Stultz struct dma_heap_export_info exp_info;
374b61614ecSJohn Stultz
375b61614ecSJohn Stultz cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
376b61614ecSJohn Stultz if (!cma_heap)
377b61614ecSJohn Stultz return -ENOMEM;
378b61614ecSJohn Stultz cma_heap->cma = cma;
379b61614ecSJohn Stultz
380b61614ecSJohn Stultz exp_info.name = cma_get_name(cma);
381b61614ecSJohn Stultz exp_info.ops = &cma_heap_ops;
382b61614ecSJohn Stultz exp_info.priv = cma_heap;
383b61614ecSJohn Stultz
384b61614ecSJohn Stultz cma_heap->heap = dma_heap_add(&exp_info);
385b61614ecSJohn Stultz if (IS_ERR(cma_heap->heap)) {
386b61614ecSJohn Stultz int ret = PTR_ERR(cma_heap->heap);
387b61614ecSJohn Stultz
388b61614ecSJohn Stultz kfree(cma_heap);
389b61614ecSJohn Stultz return ret;
390b61614ecSJohn Stultz }
391b61614ecSJohn Stultz
392b61614ecSJohn Stultz return 0;
393b61614ecSJohn Stultz }
394b61614ecSJohn Stultz
add_default_cma_heap(void)395b61614ecSJohn Stultz static int add_default_cma_heap(void)
396b61614ecSJohn Stultz {
397b61614ecSJohn Stultz struct cma *default_cma = dev_get_cma_area(NULL);
398b61614ecSJohn Stultz int ret = 0;
399b61614ecSJohn Stultz
400b61614ecSJohn Stultz if (default_cma)
401b61614ecSJohn Stultz ret = __add_cma_heap(default_cma, NULL);
402b61614ecSJohn Stultz
403b61614ecSJohn Stultz return ret;
404b61614ecSJohn Stultz }
405b61614ecSJohn Stultz module_init(add_default_cma_heap);
406b61614ecSJohn Stultz MODULE_DESCRIPTION("DMA-BUF CMA Heap");
407