1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/iommu.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_gem_cma_helper.h>
14 #include <drm/drm_prime.h>
15 #include <drm/drm_vma_manager.h>
16 
17 #include "rockchip_drm_drv.h"
18 #include "rockchip_drm_gem.h"
19 
20 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
21 {
22 	struct drm_device *drm = rk_obj->base.dev;
23 	struct rockchip_drm_private *private = drm->dev_private;
24 	int prot = IOMMU_READ | IOMMU_WRITE;
25 	ssize_t ret;
26 
27 	mutex_lock(&private->mm_lock);
28 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
29 					 rk_obj->base.size, PAGE_SIZE,
30 					 0, 0);
31 	mutex_unlock(&private->mm_lock);
32 
33 	if (ret < 0) {
34 		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
35 		return ret;
36 	}
37 
38 	rk_obj->dma_addr = rk_obj->mm.start;
39 
40 	ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
41 				prot);
42 	if (ret < rk_obj->base.size) {
43 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
44 			  ret, rk_obj->base.size);
45 		ret = -ENOMEM;
46 		goto err_remove_node;
47 	}
48 
49 	rk_obj->size = ret;
50 
51 	return 0;
52 
53 err_remove_node:
54 	mutex_lock(&private->mm_lock);
55 	drm_mm_remove_node(&rk_obj->mm);
56 	mutex_unlock(&private->mm_lock);
57 
58 	return ret;
59 }
60 
61 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
62 {
63 	struct drm_device *drm = rk_obj->base.dev;
64 	struct rockchip_drm_private *private = drm->dev_private;
65 
66 	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
67 
68 	mutex_lock(&private->mm_lock);
69 
70 	drm_mm_remove_node(&rk_obj->mm);
71 
72 	mutex_unlock(&private->mm_lock);
73 
74 	return 0;
75 }
76 
77 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
78 {
79 	struct drm_device *drm = rk_obj->base.dev;
80 	int ret, i;
81 	struct scatterlist *s;
82 
83 	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
84 	if (IS_ERR(rk_obj->pages))
85 		return PTR_ERR(rk_obj->pages);
86 
87 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
88 
89 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
90 					    rk_obj->pages, rk_obj->num_pages);
91 	if (IS_ERR(rk_obj->sgt)) {
92 		ret = PTR_ERR(rk_obj->sgt);
93 		goto err_put_pages;
94 	}
95 
96 	/*
97 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
98 	 * to flush the pages associated with it.
99 	 *
100 	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
101 	 * without relying on symbols that are not exported.
102 	 */
103 	for_each_sgtable_sg(rk_obj->sgt, s, i)
104 		sg_dma_address(s) = sg_phys(s);
105 
106 	dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
107 
108 	return 0;
109 
110 err_put_pages:
111 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
112 	return ret;
113 }
114 
115 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
116 {
117 	sg_free_table(rk_obj->sgt);
118 	kfree(rk_obj->sgt);
119 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
120 }
121 
122 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
123 				    bool alloc_kmap)
124 {
125 	int ret;
126 
127 	ret = rockchip_gem_get_pages(rk_obj);
128 	if (ret < 0)
129 		return ret;
130 
131 	ret = rockchip_gem_iommu_map(rk_obj);
132 	if (ret < 0)
133 		goto err_free;
134 
135 	if (alloc_kmap) {
136 		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
137 				      pgprot_writecombine(PAGE_KERNEL));
138 		if (!rk_obj->kvaddr) {
139 			DRM_ERROR("failed to vmap() buffer\n");
140 			ret = -ENOMEM;
141 			goto err_unmap;
142 		}
143 	}
144 
145 	return 0;
146 
147 err_unmap:
148 	rockchip_gem_iommu_unmap(rk_obj);
149 err_free:
150 	rockchip_gem_put_pages(rk_obj);
151 
152 	return ret;
153 }
154 
155 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
156 				  bool alloc_kmap)
157 {
158 	struct drm_gem_object *obj = &rk_obj->base;
159 	struct drm_device *drm = obj->dev;
160 
161 	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
162 
163 	if (!alloc_kmap)
164 		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
165 
166 	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
167 					 &rk_obj->dma_addr, GFP_KERNEL,
168 					 rk_obj->dma_attrs);
169 	if (!rk_obj->kvaddr) {
170 		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
171 		return -ENOMEM;
172 	}
173 
174 	return 0;
175 }
176 
177 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
178 				  bool alloc_kmap)
179 {
180 	struct drm_gem_object *obj = &rk_obj->base;
181 	struct drm_device *drm = obj->dev;
182 	struct rockchip_drm_private *private = drm->dev_private;
183 
184 	if (private->domain)
185 		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
186 	else
187 		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
188 }
189 
190 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
191 {
192 	vunmap(rk_obj->kvaddr);
193 	rockchip_gem_iommu_unmap(rk_obj);
194 	rockchip_gem_put_pages(rk_obj);
195 }
196 
197 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
198 {
199 	struct drm_gem_object *obj = &rk_obj->base;
200 	struct drm_device *drm = obj->dev;
201 
202 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
203 		       rk_obj->dma_attrs);
204 }
205 
206 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
207 {
208 	if (rk_obj->pages)
209 		rockchip_gem_free_iommu(rk_obj);
210 	else
211 		rockchip_gem_free_dma(rk_obj);
212 }
213 
214 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
215 					      struct vm_area_struct *vma)
216 {
217 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
218 	unsigned int count = obj->size >> PAGE_SHIFT;
219 	unsigned long user_count = vma_pages(vma);
220 
221 	if (user_count == 0)
222 		return -ENXIO;
223 
224 	return vm_map_pages(vma, rk_obj->pages, count);
225 }
226 
227 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
228 					    struct vm_area_struct *vma)
229 {
230 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
231 	struct drm_device *drm = obj->dev;
232 
233 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
234 			      obj->size, rk_obj->dma_attrs);
235 }
236 
237 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
238 					struct vm_area_struct *vma)
239 {
240 	int ret;
241 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
242 
243 	/*
244 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
245 	 * whole buffer from the start.
246 	 */
247 	vma->vm_pgoff = 0;
248 
249 	/*
250 	 * We allocated a struct page table for rk_obj, so clear
251 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
252 	 */
253 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
254 	vma->vm_flags &= ~VM_PFNMAP;
255 
256 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
257 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
258 
259 	if (rk_obj->pages)
260 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
261 	else
262 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
263 
264 	if (ret)
265 		drm_gem_vm_close(vma);
266 
267 	return ret;
268 }
269 
270 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
271 {
272 	drm_gem_object_release(&rk_obj->base);
273 	kfree(rk_obj);
274 }
275 
276 static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
277 	.free = rockchip_gem_free_object,
278 	.get_sg_table = rockchip_gem_prime_get_sg_table,
279 	.vmap = rockchip_gem_prime_vmap,
280 	.vunmap	= rockchip_gem_prime_vunmap,
281 	.mmap = rockchip_drm_gem_object_mmap,
282 	.vm_ops = &drm_gem_cma_vm_ops,
283 };
284 
285 static struct rockchip_gem_object *
286 	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
287 {
288 	struct rockchip_gem_object *rk_obj;
289 	struct drm_gem_object *obj;
290 
291 	size = round_up(size, PAGE_SIZE);
292 
293 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
294 	if (!rk_obj)
295 		return ERR_PTR(-ENOMEM);
296 
297 	obj = &rk_obj->base;
298 
299 	obj->funcs = &rockchip_gem_object_funcs;
300 
301 	drm_gem_object_init(drm, obj, size);
302 
303 	return rk_obj;
304 }
305 
306 struct rockchip_gem_object *
307 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
308 			   bool alloc_kmap)
309 {
310 	struct rockchip_gem_object *rk_obj;
311 	int ret;
312 
313 	rk_obj = rockchip_gem_alloc_object(drm, size);
314 	if (IS_ERR(rk_obj))
315 		return rk_obj;
316 
317 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
318 	if (ret)
319 		goto err_free_rk_obj;
320 
321 	return rk_obj;
322 
323 err_free_rk_obj:
324 	rockchip_gem_release_object(rk_obj);
325 	return ERR_PTR(ret);
326 }
327 
328 /*
329  * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
330  * callback function
331  */
332 void rockchip_gem_free_object(struct drm_gem_object *obj)
333 {
334 	struct drm_device *drm = obj->dev;
335 	struct rockchip_drm_private *private = drm->dev_private;
336 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
337 
338 	if (obj->import_attach) {
339 		if (private->domain) {
340 			rockchip_gem_iommu_unmap(rk_obj);
341 		} else {
342 			dma_unmap_sgtable(drm->dev, rk_obj->sgt,
343 					  DMA_BIDIRECTIONAL, 0);
344 		}
345 		drm_prime_gem_destroy(obj, rk_obj->sgt);
346 	} else {
347 		rockchip_gem_free_buf(rk_obj);
348 	}
349 
350 	rockchip_gem_release_object(rk_obj);
351 }
352 
353 /*
354  * rockchip_gem_create_with_handle - allocate an object with the given
355  * size and create a gem handle on it
356  *
357  * returns a struct rockchip_gem_object* on success or ERR_PTR values
358  * on failure.
359  */
360 static struct rockchip_gem_object *
361 rockchip_gem_create_with_handle(struct drm_file *file_priv,
362 				struct drm_device *drm, unsigned int size,
363 				unsigned int *handle)
364 {
365 	struct rockchip_gem_object *rk_obj;
366 	struct drm_gem_object *obj;
367 	int ret;
368 
369 	rk_obj = rockchip_gem_create_object(drm, size, false);
370 	if (IS_ERR(rk_obj))
371 		return ERR_CAST(rk_obj);
372 
373 	obj = &rk_obj->base;
374 
375 	/*
376 	 * allocate a id of idr table where the obj is registered
377 	 * and handle has the id what user can see.
378 	 */
379 	ret = drm_gem_handle_create(file_priv, obj, handle);
380 	if (ret)
381 		goto err_handle_create;
382 
383 	/* drop reference from allocate - handle holds it now. */
384 	drm_gem_object_put(obj);
385 
386 	return rk_obj;
387 
388 err_handle_create:
389 	rockchip_gem_free_object(obj);
390 
391 	return ERR_PTR(ret);
392 }
393 
394 /*
395  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
396  * function
397  *
398  * This aligns the pitch and size arguments to the minimum required. wrap
399  * this into your own function if you need bigger alignment.
400  */
401 int rockchip_gem_dumb_create(struct drm_file *file_priv,
402 			     struct drm_device *dev,
403 			     struct drm_mode_create_dumb *args)
404 {
405 	struct rockchip_gem_object *rk_obj;
406 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
407 
408 	/*
409 	 * align to 64 bytes since Mali requires it.
410 	 */
411 	args->pitch = ALIGN(min_pitch, 64);
412 	args->size = args->pitch * args->height;
413 
414 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
415 						 &args->handle);
416 
417 	return PTR_ERR_OR_ZERO(rk_obj);
418 }
419 
420 /*
421  * Allocate a sg_table for this GEM object.
422  * Note: Both the table's contents, and the sg_table itself must be freed by
423  *       the caller.
424  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
425  */
426 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
427 {
428 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
429 	struct drm_device *drm = obj->dev;
430 	struct sg_table *sgt;
431 	int ret;
432 
433 	if (rk_obj->pages)
434 		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
435 
436 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
437 	if (!sgt)
438 		return ERR_PTR(-ENOMEM);
439 
440 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
441 				    rk_obj->dma_addr, obj->size,
442 				    rk_obj->dma_attrs);
443 	if (ret) {
444 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
445 		kfree(sgt);
446 		return ERR_PTR(ret);
447 	}
448 
449 	return sgt;
450 }
451 
452 static int
453 rockchip_gem_iommu_map_sg(struct drm_device *drm,
454 			  struct dma_buf_attachment *attach,
455 			  struct sg_table *sg,
456 			  struct rockchip_gem_object *rk_obj)
457 {
458 	rk_obj->sgt = sg;
459 	return rockchip_gem_iommu_map(rk_obj);
460 }
461 
462 static int
463 rockchip_gem_dma_map_sg(struct drm_device *drm,
464 			struct dma_buf_attachment *attach,
465 			struct sg_table *sg,
466 			struct rockchip_gem_object *rk_obj)
467 {
468 	int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
469 	if (err)
470 		return err;
471 
472 	if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
473 		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
474 		dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
475 		return -EINVAL;
476 	}
477 
478 	rk_obj->dma_addr = sg_dma_address(sg->sgl);
479 	rk_obj->sgt = sg;
480 	return 0;
481 }
482 
483 struct drm_gem_object *
484 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
485 				   struct dma_buf_attachment *attach,
486 				   struct sg_table *sg)
487 {
488 	struct rockchip_drm_private *private = drm->dev_private;
489 	struct rockchip_gem_object *rk_obj;
490 	int ret;
491 
492 	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
493 	if (IS_ERR(rk_obj))
494 		return ERR_CAST(rk_obj);
495 
496 	if (private->domain)
497 		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
498 	else
499 		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
500 
501 	if (ret < 0) {
502 		DRM_ERROR("failed to import sg table: %d\n", ret);
503 		goto err_free_rk_obj;
504 	}
505 
506 	return &rk_obj->base;
507 
508 err_free_rk_obj:
509 	rockchip_gem_release_object(rk_obj);
510 	return ERR_PTR(ret);
511 }
512 
513 int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
514 {
515 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
516 
517 	if (rk_obj->pages) {
518 		void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
519 				  pgprot_writecombine(PAGE_KERNEL));
520 		if (!vaddr)
521 			return -ENOMEM;
522 		dma_buf_map_set_vaddr(map, vaddr);
523 		return 0;
524 	}
525 
526 	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
527 		return -ENOMEM;
528 	dma_buf_map_set_vaddr(map, rk_obj->kvaddr);
529 
530 	return 0;
531 }
532 
533 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
534 {
535 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
536 
537 	if (rk_obj->pages) {
538 		vunmap(map->vaddr);
539 		return;
540 	}
541 
542 	/* Nothing to do if allocated by DMA mapping API. */
543 }
544