1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF System heap exporter
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019, 2020 Linaro Ltd.
7  *
8  * Portions based off of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 
24 static struct dma_heap *sys_heap;
25 
26 struct system_heap_buffer {
27 	struct dma_heap *heap;
28 	struct list_head attachments;
29 	struct mutex lock;
30 	unsigned long len;
31 	struct sg_table sg_table;
32 	int vmap_cnt;
33 	void *vaddr;
34 };
35 
36 struct dma_heap_attachment {
37 	struct device *dev;
38 	struct sg_table *table;
39 	struct list_head list;
40 	bool mapped;
41 };
42 
43 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
44 #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
45 #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
46 				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
47 				| __GFP_COMP)
48 static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
49 /*
50  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
51  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
52  * of order 0 pages can significantly improve the performance of many IOMMUs
53  * by reducing TLB pressure and time spent updating page tables.
54  */
55 static const unsigned int orders[] = {8, 4, 0};
56 #define NUM_ORDERS ARRAY_SIZE(orders)
57 
58 static struct sg_table *dup_sg_table(struct sg_table *table)
59 {
60 	struct sg_table *new_table;
61 	int ret, i;
62 	struct scatterlist *sg, *new_sg;
63 
64 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
65 	if (!new_table)
66 		return ERR_PTR(-ENOMEM);
67 
68 	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
69 	if (ret) {
70 		kfree(new_table);
71 		return ERR_PTR(-ENOMEM);
72 	}
73 
74 	new_sg = new_table->sgl;
75 	for_each_sgtable_sg(table, sg, i) {
76 		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
77 		new_sg = sg_next(new_sg);
78 	}
79 
80 	return new_table;
81 }
82 
83 static int system_heap_attach(struct dma_buf *dmabuf,
84 			      struct dma_buf_attachment *attachment)
85 {
86 	struct system_heap_buffer *buffer = dmabuf->priv;
87 	struct dma_heap_attachment *a;
88 	struct sg_table *table;
89 
90 	a = kzalloc(sizeof(*a), GFP_KERNEL);
91 	if (!a)
92 		return -ENOMEM;
93 
94 	table = dup_sg_table(&buffer->sg_table);
95 	if (IS_ERR(table)) {
96 		kfree(a);
97 		return -ENOMEM;
98 	}
99 
100 	a->table = table;
101 	a->dev = attachment->dev;
102 	INIT_LIST_HEAD(&a->list);
103 	a->mapped = false;
104 
105 	attachment->priv = a;
106 
107 	mutex_lock(&buffer->lock);
108 	list_add(&a->list, &buffer->attachments);
109 	mutex_unlock(&buffer->lock);
110 
111 	return 0;
112 }
113 
114 static void system_heap_detach(struct dma_buf *dmabuf,
115 			       struct dma_buf_attachment *attachment)
116 {
117 	struct system_heap_buffer *buffer = dmabuf->priv;
118 	struct dma_heap_attachment *a = attachment->priv;
119 
120 	mutex_lock(&buffer->lock);
121 	list_del(&a->list);
122 	mutex_unlock(&buffer->lock);
123 
124 	sg_free_table(a->table);
125 	kfree(a->table);
126 	kfree(a);
127 }
128 
129 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130 						enum dma_data_direction direction)
131 {
132 	struct dma_heap_attachment *a = attachment->priv;
133 	struct sg_table *table = a->table;
134 	int ret;
135 
136 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
137 	if (ret)
138 		return ERR_PTR(ret);
139 
140 	a->mapped = true;
141 	return table;
142 }
143 
144 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145 				      struct sg_table *table,
146 				      enum dma_data_direction direction)
147 {
148 	struct dma_heap_attachment *a = attachment->priv;
149 
150 	a->mapped = false;
151 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
152 }
153 
154 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
155 						enum dma_data_direction direction)
156 {
157 	struct system_heap_buffer *buffer = dmabuf->priv;
158 	struct dma_heap_attachment *a;
159 
160 	mutex_lock(&buffer->lock);
161 
162 	if (buffer->vmap_cnt)
163 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
164 
165 	list_for_each_entry(a, &buffer->attachments, list) {
166 		if (!a->mapped)
167 			continue;
168 		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
169 	}
170 	mutex_unlock(&buffer->lock);
171 
172 	return 0;
173 }
174 
175 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
176 					      enum dma_data_direction direction)
177 {
178 	struct system_heap_buffer *buffer = dmabuf->priv;
179 	struct dma_heap_attachment *a;
180 
181 	mutex_lock(&buffer->lock);
182 
183 	if (buffer->vmap_cnt)
184 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
185 
186 	list_for_each_entry(a, &buffer->attachments, list) {
187 		if (!a->mapped)
188 			continue;
189 		dma_sync_sgtable_for_device(a->dev, a->table, direction);
190 	}
191 	mutex_unlock(&buffer->lock);
192 
193 	return 0;
194 }
195 
196 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
197 {
198 	struct system_heap_buffer *buffer = dmabuf->priv;
199 	struct sg_table *table = &buffer->sg_table;
200 	unsigned long addr = vma->vm_start;
201 	struct sg_page_iter piter;
202 	int ret;
203 
204 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
205 		struct page *page = sg_page_iter_page(&piter);
206 
207 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
208 				      vma->vm_page_prot);
209 		if (ret)
210 			return ret;
211 		addr += PAGE_SIZE;
212 		if (addr >= vma->vm_end)
213 			return 0;
214 	}
215 	return 0;
216 }
217 
218 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
219 {
220 	struct sg_table *table = &buffer->sg_table;
221 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
222 	struct page **pages = vmalloc(sizeof(struct page *) * npages);
223 	struct page **tmp = pages;
224 	struct sg_page_iter piter;
225 	void *vaddr;
226 
227 	if (!pages)
228 		return ERR_PTR(-ENOMEM);
229 
230 	for_each_sgtable_page(table, &piter, 0) {
231 		WARN_ON(tmp - pages >= npages);
232 		*tmp++ = sg_page_iter_page(&piter);
233 	}
234 
235 	vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
236 	vfree(pages);
237 
238 	if (!vaddr)
239 		return ERR_PTR(-ENOMEM);
240 
241 	return vaddr;
242 }
243 
244 static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
245 {
246 	struct system_heap_buffer *buffer = dmabuf->priv;
247 	void *vaddr;
248 	int ret = 0;
249 
250 	mutex_lock(&buffer->lock);
251 	if (buffer->vmap_cnt) {
252 		buffer->vmap_cnt++;
253 		dma_buf_map_set_vaddr(map, buffer->vaddr);
254 		goto out;
255 	}
256 
257 	vaddr = system_heap_do_vmap(buffer);
258 	if (IS_ERR(vaddr)) {
259 		ret = PTR_ERR(vaddr);
260 		goto out;
261 	}
262 
263 	buffer->vaddr = vaddr;
264 	buffer->vmap_cnt++;
265 	dma_buf_map_set_vaddr(map, buffer->vaddr);
266 out:
267 	mutex_unlock(&buffer->lock);
268 
269 	return ret;
270 }
271 
272 static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
273 {
274 	struct system_heap_buffer *buffer = dmabuf->priv;
275 
276 	mutex_lock(&buffer->lock);
277 	if (!--buffer->vmap_cnt) {
278 		vunmap(buffer->vaddr);
279 		buffer->vaddr = NULL;
280 	}
281 	mutex_unlock(&buffer->lock);
282 	dma_buf_map_clear(map);
283 }
284 
285 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
286 {
287 	struct system_heap_buffer *buffer = dmabuf->priv;
288 	struct sg_table *table;
289 	struct scatterlist *sg;
290 	int i;
291 
292 	table = &buffer->sg_table;
293 	for_each_sg(table->sgl, sg, table->nents, i) {
294 		struct page *page = sg_page(sg);
295 
296 		__free_pages(page, compound_order(page));
297 	}
298 	sg_free_table(table);
299 	kfree(buffer);
300 }
301 
302 static const struct dma_buf_ops system_heap_buf_ops = {
303 	.attach = system_heap_attach,
304 	.detach = system_heap_detach,
305 	.map_dma_buf = system_heap_map_dma_buf,
306 	.unmap_dma_buf = system_heap_unmap_dma_buf,
307 	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
308 	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
309 	.mmap = system_heap_mmap,
310 	.vmap = system_heap_vmap,
311 	.vunmap = system_heap_vunmap,
312 	.release = system_heap_dma_buf_release,
313 };
314 
315 static struct page *alloc_largest_available(unsigned long size,
316 					    unsigned int max_order)
317 {
318 	struct page *page;
319 	int i;
320 
321 	for (i = 0; i < NUM_ORDERS; i++) {
322 		if (size <  (PAGE_SIZE << orders[i]))
323 			continue;
324 		if (max_order < orders[i])
325 			continue;
326 
327 		page = alloc_pages(order_flags[i], orders[i]);
328 		if (!page)
329 			continue;
330 		return page;
331 	}
332 	return NULL;
333 }
334 
335 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
336 					    unsigned long len,
337 					    unsigned long fd_flags,
338 					    unsigned long heap_flags)
339 {
340 	struct system_heap_buffer *buffer;
341 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
342 	unsigned long size_remaining = len;
343 	unsigned int max_order = orders[0];
344 	struct dma_buf *dmabuf;
345 	struct sg_table *table;
346 	struct scatterlist *sg;
347 	struct list_head pages;
348 	struct page *page, *tmp_page;
349 	int i, ret = -ENOMEM;
350 
351 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
352 	if (!buffer)
353 		return ERR_PTR(-ENOMEM);
354 
355 	INIT_LIST_HEAD(&buffer->attachments);
356 	mutex_init(&buffer->lock);
357 	buffer->heap = heap;
358 	buffer->len = len;
359 
360 	INIT_LIST_HEAD(&pages);
361 	i = 0;
362 	while (size_remaining > 0) {
363 		/*
364 		 * Avoid trying to allocate memory if the process
365 		 * has been killed by SIGKILL
366 		 */
367 		if (fatal_signal_pending(current)) {
368 			ret = -EINTR;
369 			goto free_buffer;
370 		}
371 
372 		page = alloc_largest_available(size_remaining, max_order);
373 		if (!page)
374 			goto free_buffer;
375 
376 		list_add_tail(&page->lru, &pages);
377 		size_remaining -= page_size(page);
378 		max_order = compound_order(page);
379 		i++;
380 	}
381 
382 	table = &buffer->sg_table;
383 	if (sg_alloc_table(table, i, GFP_KERNEL))
384 		goto free_buffer;
385 
386 	sg = table->sgl;
387 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
388 		sg_set_page(sg, page, page_size(page), 0);
389 		sg = sg_next(sg);
390 		list_del(&page->lru);
391 	}
392 
393 	/* create the dmabuf */
394 	exp_info.exp_name = dma_heap_get_name(heap);
395 	exp_info.ops = &system_heap_buf_ops;
396 	exp_info.size = buffer->len;
397 	exp_info.flags = fd_flags;
398 	exp_info.priv = buffer;
399 	dmabuf = dma_buf_export(&exp_info);
400 	if (IS_ERR(dmabuf)) {
401 		ret = PTR_ERR(dmabuf);
402 		goto free_pages;
403 	}
404 	return dmabuf;
405 
406 free_pages:
407 	for_each_sgtable_sg(table, sg, i) {
408 		struct page *p = sg_page(sg);
409 
410 		__free_pages(p, compound_order(p));
411 	}
412 	sg_free_table(table);
413 free_buffer:
414 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
415 		__free_pages(page, compound_order(page));
416 	kfree(buffer);
417 
418 	return ERR_PTR(ret);
419 }
420 
421 static const struct dma_heap_ops system_heap_ops = {
422 	.allocate = system_heap_allocate,
423 };
424 
425 static int system_heap_create(void)
426 {
427 	struct dma_heap_export_info exp_info;
428 
429 	exp_info.name = "system";
430 	exp_info.ops = &system_heap_ops;
431 	exp_info.priv = NULL;
432 
433 	sys_heap = dma_heap_add(&exp_info);
434 	if (IS_ERR(sys_heap))
435 		return PTR_ERR(sys_heap);
436 
437 	return 0;
438 }
439 module_init(system_heap_create);
440 MODULE_LICENSE("GPL v2");
441