1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/iommu.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_gem_cma_helper.h>
14 #include <drm/drm_prime.h>
15 #include <drm/drm_vma_manager.h>
16 
17 #include "rockchip_drm_drv.h"
18 #include "rockchip_drm_gem.h"
19 
20 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
21 {
22 	struct drm_device *drm = rk_obj->base.dev;
23 	struct rockchip_drm_private *private = drm->dev_private;
24 	int prot = IOMMU_READ | IOMMU_WRITE;
25 	ssize_t ret;
26 
27 	mutex_lock(&private->mm_lock);
28 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
29 					 rk_obj->base.size, PAGE_SIZE,
30 					 0, 0);
31 	mutex_unlock(&private->mm_lock);
32 
33 	if (ret < 0) {
34 		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
35 		return ret;
36 	}
37 
38 	rk_obj->dma_addr = rk_obj->mm.start;
39 
40 	ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
41 			   rk_obj->sgt->nents, prot);
42 	if (ret < rk_obj->base.size) {
43 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
44 			  ret, rk_obj->base.size);
45 		ret = -ENOMEM;
46 		goto err_remove_node;
47 	}
48 
49 	rk_obj->size = ret;
50 
51 	return 0;
52 
53 err_remove_node:
54 	mutex_lock(&private->mm_lock);
55 	drm_mm_remove_node(&rk_obj->mm);
56 	mutex_unlock(&private->mm_lock);
57 
58 	return ret;
59 }
60 
61 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
62 {
63 	struct drm_device *drm = rk_obj->base.dev;
64 	struct rockchip_drm_private *private = drm->dev_private;
65 
66 	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
67 
68 	mutex_lock(&private->mm_lock);
69 
70 	drm_mm_remove_node(&rk_obj->mm);
71 
72 	mutex_unlock(&private->mm_lock);
73 
74 	return 0;
75 }
76 
77 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
78 {
79 	struct drm_device *drm = rk_obj->base.dev;
80 	int ret, i;
81 	struct scatterlist *s;
82 
83 	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
84 	if (IS_ERR(rk_obj->pages))
85 		return PTR_ERR(rk_obj->pages);
86 
87 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
88 
89 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
90 					    rk_obj->pages, rk_obj->num_pages);
91 	if (IS_ERR(rk_obj->sgt)) {
92 		ret = PTR_ERR(rk_obj->sgt);
93 		goto err_put_pages;
94 	}
95 
96 	/*
97 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
98 	 * to flush the pages associated with it.
99 	 *
100 	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
101 	 * without relying on symbols that are not exported.
102 	 */
103 	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
104 		sg_dma_address(s) = sg_phys(s);
105 
106 	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
107 			       DMA_TO_DEVICE);
108 
109 	return 0;
110 
111 err_put_pages:
112 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
113 	return ret;
114 }
115 
116 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
117 {
118 	sg_free_table(rk_obj->sgt);
119 	kfree(rk_obj->sgt);
120 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
121 }
122 
123 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
124 				    bool alloc_kmap)
125 {
126 	int ret;
127 
128 	ret = rockchip_gem_get_pages(rk_obj);
129 	if (ret < 0)
130 		return ret;
131 
132 	ret = rockchip_gem_iommu_map(rk_obj);
133 	if (ret < 0)
134 		goto err_free;
135 
136 	if (alloc_kmap) {
137 		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
138 				      pgprot_writecombine(PAGE_KERNEL));
139 		if (!rk_obj->kvaddr) {
140 			DRM_ERROR("failed to vmap() buffer\n");
141 			ret = -ENOMEM;
142 			goto err_unmap;
143 		}
144 	}
145 
146 	return 0;
147 
148 err_unmap:
149 	rockchip_gem_iommu_unmap(rk_obj);
150 err_free:
151 	rockchip_gem_put_pages(rk_obj);
152 
153 	return ret;
154 }
155 
156 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
157 				  bool alloc_kmap)
158 {
159 	struct drm_gem_object *obj = &rk_obj->base;
160 	struct drm_device *drm = obj->dev;
161 
162 	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
163 
164 	if (!alloc_kmap)
165 		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
166 
167 	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
168 					 &rk_obj->dma_addr, GFP_KERNEL,
169 					 rk_obj->dma_attrs);
170 	if (!rk_obj->kvaddr) {
171 		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
172 		return -ENOMEM;
173 	}
174 
175 	return 0;
176 }
177 
178 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
179 				  bool alloc_kmap)
180 {
181 	struct drm_gem_object *obj = &rk_obj->base;
182 	struct drm_device *drm = obj->dev;
183 	struct rockchip_drm_private *private = drm->dev_private;
184 
185 	if (private->domain)
186 		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
187 	else
188 		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
189 }
190 
191 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
192 {
193 	vunmap(rk_obj->kvaddr);
194 	rockchip_gem_iommu_unmap(rk_obj);
195 	rockchip_gem_put_pages(rk_obj);
196 }
197 
198 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
199 {
200 	struct drm_gem_object *obj = &rk_obj->base;
201 	struct drm_device *drm = obj->dev;
202 
203 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
204 		       rk_obj->dma_attrs);
205 }
206 
207 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
208 {
209 	if (rk_obj->pages)
210 		rockchip_gem_free_iommu(rk_obj);
211 	else
212 		rockchip_gem_free_dma(rk_obj);
213 }
214 
215 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
216 					      struct vm_area_struct *vma)
217 {
218 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
219 	unsigned int count = obj->size >> PAGE_SHIFT;
220 	unsigned long user_count = vma_pages(vma);
221 
222 	if (user_count == 0)
223 		return -ENXIO;
224 
225 	return vm_map_pages(vma, rk_obj->pages, count);
226 }
227 
228 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
229 					    struct vm_area_struct *vma)
230 {
231 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
232 	struct drm_device *drm = obj->dev;
233 
234 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
235 			      obj->size, rk_obj->dma_attrs);
236 }
237 
238 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
239 					struct vm_area_struct *vma)
240 {
241 	int ret;
242 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
243 
244 	/*
245 	 * We allocated a struct page table for rk_obj, so clear
246 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
247 	 */
248 	vma->vm_flags &= ~VM_PFNMAP;
249 
250 	if (rk_obj->pages)
251 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
252 	else
253 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
254 
255 	if (ret)
256 		drm_gem_vm_close(vma);
257 
258 	return ret;
259 }
260 
261 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
262 			  struct vm_area_struct *vma)
263 {
264 	int ret;
265 
266 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
267 	if (ret)
268 		return ret;
269 
270 	return rockchip_drm_gem_object_mmap(obj, vma);
271 }
272 
273 /* drm driver mmap file operations */
274 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
275 {
276 	struct drm_gem_object *obj;
277 	int ret;
278 
279 	ret = drm_gem_mmap(filp, vma);
280 	if (ret)
281 		return ret;
282 
283 	/*
284 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
285 	 * whole buffer from the start.
286 	 */
287 	vma->vm_pgoff = 0;
288 
289 	obj = vma->vm_private_data;
290 
291 	return rockchip_drm_gem_object_mmap(obj, vma);
292 }
293 
294 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
295 {
296 	drm_gem_object_release(&rk_obj->base);
297 	kfree(rk_obj);
298 }
299 
300 static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
301 	.free = rockchip_gem_free_object,
302 	.get_sg_table = rockchip_gem_prime_get_sg_table,
303 	.vmap = rockchip_gem_prime_vmap,
304 	.vunmap	= rockchip_gem_prime_vunmap,
305 	.vm_ops = &drm_gem_cma_vm_ops,
306 };
307 
308 static struct rockchip_gem_object *
309 	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
310 {
311 	struct rockchip_gem_object *rk_obj;
312 	struct drm_gem_object *obj;
313 
314 	size = round_up(size, PAGE_SIZE);
315 
316 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
317 	if (!rk_obj)
318 		return ERR_PTR(-ENOMEM);
319 
320 	obj = &rk_obj->base;
321 
322 	obj->funcs = &rockchip_gem_object_funcs;
323 
324 	drm_gem_object_init(drm, obj, size);
325 
326 	return rk_obj;
327 }
328 
329 struct rockchip_gem_object *
330 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
331 			   bool alloc_kmap)
332 {
333 	struct rockchip_gem_object *rk_obj;
334 	int ret;
335 
336 	rk_obj = rockchip_gem_alloc_object(drm, size);
337 	if (IS_ERR(rk_obj))
338 		return rk_obj;
339 
340 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
341 	if (ret)
342 		goto err_free_rk_obj;
343 
344 	return rk_obj;
345 
346 err_free_rk_obj:
347 	rockchip_gem_release_object(rk_obj);
348 	return ERR_PTR(ret);
349 }
350 
351 /*
352  * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
353  * callback function
354  */
355 void rockchip_gem_free_object(struct drm_gem_object *obj)
356 {
357 	struct drm_device *drm = obj->dev;
358 	struct rockchip_drm_private *private = drm->dev_private;
359 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
360 
361 	if (obj->import_attach) {
362 		if (private->domain) {
363 			rockchip_gem_iommu_unmap(rk_obj);
364 		} else {
365 			dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
366 				     rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
367 		}
368 		drm_prime_gem_destroy(obj, rk_obj->sgt);
369 	} else {
370 		rockchip_gem_free_buf(rk_obj);
371 	}
372 
373 	rockchip_gem_release_object(rk_obj);
374 }
375 
376 /*
377  * rockchip_gem_create_with_handle - allocate an object with the given
378  * size and create a gem handle on it
379  *
380  * returns a struct rockchip_gem_object* on success or ERR_PTR values
381  * on failure.
382  */
383 static struct rockchip_gem_object *
384 rockchip_gem_create_with_handle(struct drm_file *file_priv,
385 				struct drm_device *drm, unsigned int size,
386 				unsigned int *handle)
387 {
388 	struct rockchip_gem_object *rk_obj;
389 	struct drm_gem_object *obj;
390 	int ret;
391 
392 	rk_obj = rockchip_gem_create_object(drm, size, false);
393 	if (IS_ERR(rk_obj))
394 		return ERR_CAST(rk_obj);
395 
396 	obj = &rk_obj->base;
397 
398 	/*
399 	 * allocate a id of idr table where the obj is registered
400 	 * and handle has the id what user can see.
401 	 */
402 	ret = drm_gem_handle_create(file_priv, obj, handle);
403 	if (ret)
404 		goto err_handle_create;
405 
406 	/* drop reference from allocate - handle holds it now. */
407 	drm_gem_object_put(obj);
408 
409 	return rk_obj;
410 
411 err_handle_create:
412 	rockchip_gem_free_object(obj);
413 
414 	return ERR_PTR(ret);
415 }
416 
417 /*
418  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
419  * function
420  *
421  * This aligns the pitch and size arguments to the minimum required. wrap
422  * this into your own function if you need bigger alignment.
423  */
424 int rockchip_gem_dumb_create(struct drm_file *file_priv,
425 			     struct drm_device *dev,
426 			     struct drm_mode_create_dumb *args)
427 {
428 	struct rockchip_gem_object *rk_obj;
429 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
430 
431 	/*
432 	 * align to 64 bytes since Mali requires it.
433 	 */
434 	args->pitch = ALIGN(min_pitch, 64);
435 	args->size = args->pitch * args->height;
436 
437 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
438 						 &args->handle);
439 
440 	return PTR_ERR_OR_ZERO(rk_obj);
441 }
442 
443 /*
444  * Allocate a sg_table for this GEM object.
445  * Note: Both the table's contents, and the sg_table itself must be freed by
446  *       the caller.
447  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
448  */
449 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
450 {
451 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
452 	struct drm_device *drm = obj->dev;
453 	struct sg_table *sgt;
454 	int ret;
455 
456 	if (rk_obj->pages)
457 		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
458 
459 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
460 	if (!sgt)
461 		return ERR_PTR(-ENOMEM);
462 
463 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
464 				    rk_obj->dma_addr, obj->size,
465 				    rk_obj->dma_attrs);
466 	if (ret) {
467 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
468 		kfree(sgt);
469 		return ERR_PTR(ret);
470 	}
471 
472 	return sgt;
473 }
474 
475 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
476 						     int count)
477 {
478 	struct scatterlist *s;
479 	dma_addr_t expected = sg_dma_address(sgt->sgl);
480 	unsigned int i;
481 	unsigned long size = 0;
482 
483 	for_each_sg(sgt->sgl, s, count, i) {
484 		if (sg_dma_address(s) != expected)
485 			break;
486 		expected = sg_dma_address(s) + sg_dma_len(s);
487 		size += sg_dma_len(s);
488 	}
489 	return size;
490 }
491 
492 static int
493 rockchip_gem_iommu_map_sg(struct drm_device *drm,
494 			  struct dma_buf_attachment *attach,
495 			  struct sg_table *sg,
496 			  struct rockchip_gem_object *rk_obj)
497 {
498 	rk_obj->sgt = sg;
499 	return rockchip_gem_iommu_map(rk_obj);
500 }
501 
502 static int
503 rockchip_gem_dma_map_sg(struct drm_device *drm,
504 			struct dma_buf_attachment *attach,
505 			struct sg_table *sg,
506 			struct rockchip_gem_object *rk_obj)
507 {
508 	int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
509 			       DMA_BIDIRECTIONAL);
510 	if (!count)
511 		return -EINVAL;
512 
513 	if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
514 		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
515 		dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
516 			     DMA_BIDIRECTIONAL);
517 		return -EINVAL;
518 	}
519 
520 	rk_obj->dma_addr = sg_dma_address(sg->sgl);
521 	rk_obj->sgt = sg;
522 	return 0;
523 }
524 
525 struct drm_gem_object *
526 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
527 				   struct dma_buf_attachment *attach,
528 				   struct sg_table *sg)
529 {
530 	struct rockchip_drm_private *private = drm->dev_private;
531 	struct rockchip_gem_object *rk_obj;
532 	int ret;
533 
534 	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
535 	if (IS_ERR(rk_obj))
536 		return ERR_CAST(rk_obj);
537 
538 	if (private->domain)
539 		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
540 	else
541 		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
542 
543 	if (ret < 0) {
544 		DRM_ERROR("failed to import sg table: %d\n", ret);
545 		goto err_free_rk_obj;
546 	}
547 
548 	return &rk_obj->base;
549 
550 err_free_rk_obj:
551 	rockchip_gem_release_object(rk_obj);
552 	return ERR_PTR(ret);
553 }
554 
555 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
556 {
557 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
558 
559 	if (rk_obj->pages)
560 		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
561 			    pgprot_writecombine(PAGE_KERNEL));
562 
563 	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
564 		return NULL;
565 
566 	return rk_obj->kvaddr;
567 }
568 
569 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
570 {
571 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
572 
573 	if (rk_obj->pages) {
574 		vunmap(vaddr);
575 		return;
576 	}
577 
578 	/* Nothing to do if allocated by DMA mapping API. */
579 }
580