1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/iommu.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
15 
16 #include "rockchip_drm_drv.h"
17 #include "rockchip_drm_gem.h"
18 
19 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
20 {
21 	struct drm_device *drm = rk_obj->base.dev;
22 	struct rockchip_drm_private *private = drm->dev_private;
23 	int prot = IOMMU_READ | IOMMU_WRITE;
24 	ssize_t ret;
25 
26 	mutex_lock(&private->mm_lock);
27 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
28 					 rk_obj->base.size, PAGE_SIZE,
29 					 0, 0);
30 	mutex_unlock(&private->mm_lock);
31 
32 	if (ret < 0) {
33 		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
34 		return ret;
35 	}
36 
37 	rk_obj->dma_addr = rk_obj->mm.start;
38 
39 	ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
40 			   rk_obj->sgt->nents, prot);
41 	if (ret < rk_obj->base.size) {
42 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
43 			  ret, rk_obj->base.size);
44 		ret = -ENOMEM;
45 		goto err_remove_node;
46 	}
47 
48 	rk_obj->size = ret;
49 
50 	return 0;
51 
52 err_remove_node:
53 	mutex_lock(&private->mm_lock);
54 	drm_mm_remove_node(&rk_obj->mm);
55 	mutex_unlock(&private->mm_lock);
56 
57 	return ret;
58 }
59 
60 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
61 {
62 	struct drm_device *drm = rk_obj->base.dev;
63 	struct rockchip_drm_private *private = drm->dev_private;
64 
65 	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
66 
67 	mutex_lock(&private->mm_lock);
68 
69 	drm_mm_remove_node(&rk_obj->mm);
70 
71 	mutex_unlock(&private->mm_lock);
72 
73 	return 0;
74 }
75 
76 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
77 {
78 	struct drm_device *drm = rk_obj->base.dev;
79 	int ret, i;
80 	struct scatterlist *s;
81 
82 	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
83 	if (IS_ERR(rk_obj->pages))
84 		return PTR_ERR(rk_obj->pages);
85 
86 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
87 
88 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
89 					    rk_obj->pages, rk_obj->num_pages);
90 	if (IS_ERR(rk_obj->sgt)) {
91 		ret = PTR_ERR(rk_obj->sgt);
92 		goto err_put_pages;
93 	}
94 
95 	/*
96 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
97 	 * to flush the pages associated with it.
98 	 *
99 	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
100 	 * without relying on symbols that are not exported.
101 	 */
102 	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
103 		sg_dma_address(s) = sg_phys(s);
104 
105 	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
106 			       DMA_TO_DEVICE);
107 
108 	return 0;
109 
110 err_put_pages:
111 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
112 	return ret;
113 }
114 
115 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
116 {
117 	sg_free_table(rk_obj->sgt);
118 	kfree(rk_obj->sgt);
119 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
120 }
121 
122 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
123 				    bool alloc_kmap)
124 {
125 	int ret;
126 
127 	ret = rockchip_gem_get_pages(rk_obj);
128 	if (ret < 0)
129 		return ret;
130 
131 	ret = rockchip_gem_iommu_map(rk_obj);
132 	if (ret < 0)
133 		goto err_free;
134 
135 	if (alloc_kmap) {
136 		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
137 				      pgprot_writecombine(PAGE_KERNEL));
138 		if (!rk_obj->kvaddr) {
139 			DRM_ERROR("failed to vmap() buffer\n");
140 			ret = -ENOMEM;
141 			goto err_unmap;
142 		}
143 	}
144 
145 	return 0;
146 
147 err_unmap:
148 	rockchip_gem_iommu_unmap(rk_obj);
149 err_free:
150 	rockchip_gem_put_pages(rk_obj);
151 
152 	return ret;
153 }
154 
155 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
156 				  bool alloc_kmap)
157 {
158 	struct drm_gem_object *obj = &rk_obj->base;
159 	struct drm_device *drm = obj->dev;
160 
161 	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
162 
163 	if (!alloc_kmap)
164 		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
165 
166 	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
167 					 &rk_obj->dma_addr, GFP_KERNEL,
168 					 rk_obj->dma_attrs);
169 	if (!rk_obj->kvaddr) {
170 		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
171 		return -ENOMEM;
172 	}
173 
174 	return 0;
175 }
176 
177 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
178 				  bool alloc_kmap)
179 {
180 	struct drm_gem_object *obj = &rk_obj->base;
181 	struct drm_device *drm = obj->dev;
182 	struct rockchip_drm_private *private = drm->dev_private;
183 
184 	if (private->domain)
185 		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
186 	else
187 		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
188 }
189 
190 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
191 {
192 	vunmap(rk_obj->kvaddr);
193 	rockchip_gem_iommu_unmap(rk_obj);
194 	rockchip_gem_put_pages(rk_obj);
195 }
196 
197 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
198 {
199 	struct drm_gem_object *obj = &rk_obj->base;
200 	struct drm_device *drm = obj->dev;
201 
202 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
203 		       rk_obj->dma_attrs);
204 }
205 
206 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
207 {
208 	if (rk_obj->pages)
209 		rockchip_gem_free_iommu(rk_obj);
210 	else
211 		rockchip_gem_free_dma(rk_obj);
212 }
213 
214 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
215 					      struct vm_area_struct *vma)
216 {
217 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
218 	unsigned int count = obj->size >> PAGE_SHIFT;
219 	unsigned long user_count = vma_pages(vma);
220 
221 	if (user_count == 0)
222 		return -ENXIO;
223 
224 	return vm_map_pages(vma, rk_obj->pages, count);
225 }
226 
227 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
228 					    struct vm_area_struct *vma)
229 {
230 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
231 	struct drm_device *drm = obj->dev;
232 
233 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
234 			      obj->size, rk_obj->dma_attrs);
235 }
236 
237 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
238 					struct vm_area_struct *vma)
239 {
240 	int ret;
241 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
242 
243 	/*
244 	 * We allocated a struct page table for rk_obj, so clear
245 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
246 	 */
247 	vma->vm_flags &= ~VM_PFNMAP;
248 
249 	if (rk_obj->pages)
250 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
251 	else
252 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
253 
254 	if (ret)
255 		drm_gem_vm_close(vma);
256 
257 	return ret;
258 }
259 
260 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
261 			  struct vm_area_struct *vma)
262 {
263 	int ret;
264 
265 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
266 	if (ret)
267 		return ret;
268 
269 	return rockchip_drm_gem_object_mmap(obj, vma);
270 }
271 
272 /* drm driver mmap file operations */
273 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
274 {
275 	struct drm_gem_object *obj;
276 	int ret;
277 
278 	ret = drm_gem_mmap(filp, vma);
279 	if (ret)
280 		return ret;
281 
282 	/*
283 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
284 	 * whole buffer from the start.
285 	 */
286 	vma->vm_pgoff = 0;
287 
288 	obj = vma->vm_private_data;
289 
290 	return rockchip_drm_gem_object_mmap(obj, vma);
291 }
292 
293 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
294 {
295 	drm_gem_object_release(&rk_obj->base);
296 	kfree(rk_obj);
297 }
298 
299 static struct rockchip_gem_object *
300 	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
301 {
302 	struct rockchip_gem_object *rk_obj;
303 	struct drm_gem_object *obj;
304 
305 	size = round_up(size, PAGE_SIZE);
306 
307 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
308 	if (!rk_obj)
309 		return ERR_PTR(-ENOMEM);
310 
311 	obj = &rk_obj->base;
312 
313 	drm_gem_object_init(drm, obj, size);
314 
315 	return rk_obj;
316 }
317 
318 struct rockchip_gem_object *
319 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
320 			   bool alloc_kmap)
321 {
322 	struct rockchip_gem_object *rk_obj;
323 	int ret;
324 
325 	rk_obj = rockchip_gem_alloc_object(drm, size);
326 	if (IS_ERR(rk_obj))
327 		return rk_obj;
328 
329 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
330 	if (ret)
331 		goto err_free_rk_obj;
332 
333 	return rk_obj;
334 
335 err_free_rk_obj:
336 	rockchip_gem_release_object(rk_obj);
337 	return ERR_PTR(ret);
338 }
339 
340 /*
341  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
342  * callback function
343  */
344 void rockchip_gem_free_object(struct drm_gem_object *obj)
345 {
346 	struct drm_device *drm = obj->dev;
347 	struct rockchip_drm_private *private = drm->dev_private;
348 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
349 
350 	if (obj->import_attach) {
351 		if (private->domain) {
352 			rockchip_gem_iommu_unmap(rk_obj);
353 		} else {
354 			dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
355 				     rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
356 		}
357 		drm_prime_gem_destroy(obj, rk_obj->sgt);
358 	} else {
359 		rockchip_gem_free_buf(rk_obj);
360 	}
361 
362 	rockchip_gem_release_object(rk_obj);
363 }
364 
365 /*
366  * rockchip_gem_create_with_handle - allocate an object with the given
367  * size and create a gem handle on it
368  *
369  * returns a struct rockchip_gem_object* on success or ERR_PTR values
370  * on failure.
371  */
372 static struct rockchip_gem_object *
373 rockchip_gem_create_with_handle(struct drm_file *file_priv,
374 				struct drm_device *drm, unsigned int size,
375 				unsigned int *handle)
376 {
377 	struct rockchip_gem_object *rk_obj;
378 	struct drm_gem_object *obj;
379 	int ret;
380 
381 	rk_obj = rockchip_gem_create_object(drm, size, false);
382 	if (IS_ERR(rk_obj))
383 		return ERR_CAST(rk_obj);
384 
385 	obj = &rk_obj->base;
386 
387 	/*
388 	 * allocate a id of idr table where the obj is registered
389 	 * and handle has the id what user can see.
390 	 */
391 	ret = drm_gem_handle_create(file_priv, obj, handle);
392 	if (ret)
393 		goto err_handle_create;
394 
395 	/* drop reference from allocate - handle holds it now. */
396 	drm_gem_object_put(obj);
397 
398 	return rk_obj;
399 
400 err_handle_create:
401 	rockchip_gem_free_object(obj);
402 
403 	return ERR_PTR(ret);
404 }
405 
406 /*
407  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
408  * function
409  *
410  * This aligns the pitch and size arguments to the minimum required. wrap
411  * this into your own function if you need bigger alignment.
412  */
413 int rockchip_gem_dumb_create(struct drm_file *file_priv,
414 			     struct drm_device *dev,
415 			     struct drm_mode_create_dumb *args)
416 {
417 	struct rockchip_gem_object *rk_obj;
418 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
419 
420 	/*
421 	 * align to 64 bytes since Mali requires it.
422 	 */
423 	args->pitch = ALIGN(min_pitch, 64);
424 	args->size = args->pitch * args->height;
425 
426 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
427 						 &args->handle);
428 
429 	return PTR_ERR_OR_ZERO(rk_obj);
430 }
431 
432 /*
433  * Allocate a sg_table for this GEM object.
434  * Note: Both the table's contents, and the sg_table itself must be freed by
435  *       the caller.
436  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
437  */
438 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
439 {
440 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
441 	struct drm_device *drm = obj->dev;
442 	struct sg_table *sgt;
443 	int ret;
444 
445 	if (rk_obj->pages)
446 		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
447 
448 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
449 	if (!sgt)
450 		return ERR_PTR(-ENOMEM);
451 
452 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
453 				    rk_obj->dma_addr, obj->size,
454 				    rk_obj->dma_attrs);
455 	if (ret) {
456 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
457 		kfree(sgt);
458 		return ERR_PTR(ret);
459 	}
460 
461 	return sgt;
462 }
463 
464 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
465 						     int count)
466 {
467 	struct scatterlist *s;
468 	dma_addr_t expected = sg_dma_address(sgt->sgl);
469 	unsigned int i;
470 	unsigned long size = 0;
471 
472 	for_each_sg(sgt->sgl, s, count, i) {
473 		if (sg_dma_address(s) != expected)
474 			break;
475 		expected = sg_dma_address(s) + sg_dma_len(s);
476 		size += sg_dma_len(s);
477 	}
478 	return size;
479 }
480 
481 static int
482 rockchip_gem_iommu_map_sg(struct drm_device *drm,
483 			  struct dma_buf_attachment *attach,
484 			  struct sg_table *sg,
485 			  struct rockchip_gem_object *rk_obj)
486 {
487 	rk_obj->sgt = sg;
488 	return rockchip_gem_iommu_map(rk_obj);
489 }
490 
491 static int
492 rockchip_gem_dma_map_sg(struct drm_device *drm,
493 			struct dma_buf_attachment *attach,
494 			struct sg_table *sg,
495 			struct rockchip_gem_object *rk_obj)
496 {
497 	int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
498 			       DMA_BIDIRECTIONAL);
499 	if (!count)
500 		return -EINVAL;
501 
502 	if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
503 		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
504 		dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
505 			     DMA_BIDIRECTIONAL);
506 		return -EINVAL;
507 	}
508 
509 	rk_obj->dma_addr = sg_dma_address(sg->sgl);
510 	rk_obj->sgt = sg;
511 	return 0;
512 }
513 
514 struct drm_gem_object *
515 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
516 				   struct dma_buf_attachment *attach,
517 				   struct sg_table *sg)
518 {
519 	struct rockchip_drm_private *private = drm->dev_private;
520 	struct rockchip_gem_object *rk_obj;
521 	int ret;
522 
523 	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
524 	if (IS_ERR(rk_obj))
525 		return ERR_CAST(rk_obj);
526 
527 	if (private->domain)
528 		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
529 	else
530 		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
531 
532 	if (ret < 0) {
533 		DRM_ERROR("failed to import sg table: %d\n", ret);
534 		goto err_free_rk_obj;
535 	}
536 
537 	return &rk_obj->base;
538 
539 err_free_rk_obj:
540 	rockchip_gem_release_object(rk_obj);
541 	return ERR_PTR(ret);
542 }
543 
544 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
545 {
546 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
547 
548 	if (rk_obj->pages)
549 		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
550 			    pgprot_writecombine(PAGE_KERNEL));
551 
552 	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
553 		return NULL;
554 
555 	return rk_obj->kvaddr;
556 }
557 
558 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
559 {
560 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
561 
562 	if (rk_obj->pages) {
563 		vunmap(vaddr);
564 		return;
565 	}
566 
567 	/* Nothing to do if allocated by DMA mapping API. */
568 }
569