1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/iommu.h>
9 
10 #include <drm/drm.h>
11 #include <drm/drm_gem.h>
12 #include <drm/drm_prime.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include "rockchip_drm_drv.h"
16 #include "rockchip_drm_gem.h"
17 
18 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
19 {
20 	struct drm_device *drm = rk_obj->base.dev;
21 	struct rockchip_drm_private *private = drm->dev_private;
22 	int prot = IOMMU_READ | IOMMU_WRITE;
23 	ssize_t ret;
24 
25 	mutex_lock(&private->mm_lock);
26 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
27 					 rk_obj->base.size, PAGE_SIZE,
28 					 0, 0);
29 	mutex_unlock(&private->mm_lock);
30 
31 	if (ret < 0) {
32 		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
33 		return ret;
34 	}
35 
36 	rk_obj->dma_addr = rk_obj->mm.start;
37 
38 	ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
39 			   rk_obj->sgt->nents, prot);
40 	if (ret < rk_obj->base.size) {
41 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
42 			  ret, rk_obj->base.size);
43 		ret = -ENOMEM;
44 		goto err_remove_node;
45 	}
46 
47 	rk_obj->size = ret;
48 
49 	return 0;
50 
51 err_remove_node:
52 	mutex_lock(&private->mm_lock);
53 	drm_mm_remove_node(&rk_obj->mm);
54 	mutex_unlock(&private->mm_lock);
55 
56 	return ret;
57 }
58 
59 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
60 {
61 	struct drm_device *drm = rk_obj->base.dev;
62 	struct rockchip_drm_private *private = drm->dev_private;
63 
64 	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
65 
66 	mutex_lock(&private->mm_lock);
67 
68 	drm_mm_remove_node(&rk_obj->mm);
69 
70 	mutex_unlock(&private->mm_lock);
71 
72 	return 0;
73 }
74 
75 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
76 {
77 	struct drm_device *drm = rk_obj->base.dev;
78 	int ret, i;
79 	struct scatterlist *s;
80 
81 	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
82 	if (IS_ERR(rk_obj->pages))
83 		return PTR_ERR(rk_obj->pages);
84 
85 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
86 
87 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
88 	if (IS_ERR(rk_obj->sgt)) {
89 		ret = PTR_ERR(rk_obj->sgt);
90 		goto err_put_pages;
91 	}
92 
93 	/*
94 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
95 	 * to flush the pages associated with it.
96 	 *
97 	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
98 	 * without relying on symbols that are not exported.
99 	 */
100 	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
101 		sg_dma_address(s) = sg_phys(s);
102 
103 	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
104 			       DMA_TO_DEVICE);
105 
106 	return 0;
107 
108 err_put_pages:
109 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
110 	return ret;
111 }
112 
113 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
114 {
115 	sg_free_table(rk_obj->sgt);
116 	kfree(rk_obj->sgt);
117 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
118 }
119 
120 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
121 				    bool alloc_kmap)
122 {
123 	int ret;
124 
125 	ret = rockchip_gem_get_pages(rk_obj);
126 	if (ret < 0)
127 		return ret;
128 
129 	ret = rockchip_gem_iommu_map(rk_obj);
130 	if (ret < 0)
131 		goto err_free;
132 
133 	if (alloc_kmap) {
134 		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
135 				      pgprot_writecombine(PAGE_KERNEL));
136 		if (!rk_obj->kvaddr) {
137 			DRM_ERROR("failed to vmap() buffer\n");
138 			ret = -ENOMEM;
139 			goto err_unmap;
140 		}
141 	}
142 
143 	return 0;
144 
145 err_unmap:
146 	rockchip_gem_iommu_unmap(rk_obj);
147 err_free:
148 	rockchip_gem_put_pages(rk_obj);
149 
150 	return ret;
151 }
152 
153 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
154 				  bool alloc_kmap)
155 {
156 	struct drm_gem_object *obj = &rk_obj->base;
157 	struct drm_device *drm = obj->dev;
158 
159 	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
160 
161 	if (!alloc_kmap)
162 		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
163 
164 	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
165 					 &rk_obj->dma_addr, GFP_KERNEL,
166 					 rk_obj->dma_attrs);
167 	if (!rk_obj->kvaddr) {
168 		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
169 		return -ENOMEM;
170 	}
171 
172 	return 0;
173 }
174 
175 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
176 				  bool alloc_kmap)
177 {
178 	struct drm_gem_object *obj = &rk_obj->base;
179 	struct drm_device *drm = obj->dev;
180 	struct rockchip_drm_private *private = drm->dev_private;
181 
182 	if (private->domain)
183 		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
184 	else
185 		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
186 }
187 
188 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
189 {
190 	vunmap(rk_obj->kvaddr);
191 	rockchip_gem_iommu_unmap(rk_obj);
192 	rockchip_gem_put_pages(rk_obj);
193 }
194 
195 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
196 {
197 	struct drm_gem_object *obj = &rk_obj->base;
198 	struct drm_device *drm = obj->dev;
199 
200 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
201 		       rk_obj->dma_attrs);
202 }
203 
204 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
205 {
206 	if (rk_obj->pages)
207 		rockchip_gem_free_iommu(rk_obj);
208 	else
209 		rockchip_gem_free_dma(rk_obj);
210 }
211 
212 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
213 					      struct vm_area_struct *vma)
214 {
215 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
216 	unsigned int count = obj->size >> PAGE_SHIFT;
217 	unsigned long user_count = vma_pages(vma);
218 
219 	if (user_count == 0)
220 		return -ENXIO;
221 
222 	return vm_map_pages(vma, rk_obj->pages, count);
223 }
224 
225 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
226 					    struct vm_area_struct *vma)
227 {
228 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
229 	struct drm_device *drm = obj->dev;
230 
231 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
232 			      obj->size, rk_obj->dma_attrs);
233 }
234 
235 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
236 					struct vm_area_struct *vma)
237 {
238 	int ret;
239 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
240 
241 	/*
242 	 * We allocated a struct page table for rk_obj, so clear
243 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
244 	 */
245 	vma->vm_flags &= ~VM_PFNMAP;
246 
247 	if (rk_obj->pages)
248 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
249 	else
250 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
251 
252 	if (ret)
253 		drm_gem_vm_close(vma);
254 
255 	return ret;
256 }
257 
258 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
259 			  struct vm_area_struct *vma)
260 {
261 	int ret;
262 
263 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
264 	if (ret)
265 		return ret;
266 
267 	return rockchip_drm_gem_object_mmap(obj, vma);
268 }
269 
270 /* drm driver mmap file operations */
271 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
272 {
273 	struct drm_gem_object *obj;
274 	int ret;
275 
276 	ret = drm_gem_mmap(filp, vma);
277 	if (ret)
278 		return ret;
279 
280 	/*
281 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
282 	 * whole buffer from the start.
283 	 */
284 	vma->vm_pgoff = 0;
285 
286 	obj = vma->vm_private_data;
287 
288 	return rockchip_drm_gem_object_mmap(obj, vma);
289 }
290 
291 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
292 {
293 	drm_gem_object_release(&rk_obj->base);
294 	kfree(rk_obj);
295 }
296 
297 static struct rockchip_gem_object *
298 	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
299 {
300 	struct rockchip_gem_object *rk_obj;
301 	struct drm_gem_object *obj;
302 
303 	size = round_up(size, PAGE_SIZE);
304 
305 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
306 	if (!rk_obj)
307 		return ERR_PTR(-ENOMEM);
308 
309 	obj = &rk_obj->base;
310 
311 	drm_gem_object_init(drm, obj, size);
312 
313 	return rk_obj;
314 }
315 
316 struct rockchip_gem_object *
317 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
318 			   bool alloc_kmap)
319 {
320 	struct rockchip_gem_object *rk_obj;
321 	int ret;
322 
323 	rk_obj = rockchip_gem_alloc_object(drm, size);
324 	if (IS_ERR(rk_obj))
325 		return rk_obj;
326 
327 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
328 	if (ret)
329 		goto err_free_rk_obj;
330 
331 	return rk_obj;
332 
333 err_free_rk_obj:
334 	rockchip_gem_release_object(rk_obj);
335 	return ERR_PTR(ret);
336 }
337 
338 /*
339  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
340  * callback function
341  */
342 void rockchip_gem_free_object(struct drm_gem_object *obj)
343 {
344 	struct drm_device *drm = obj->dev;
345 	struct rockchip_drm_private *private = drm->dev_private;
346 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
347 
348 	if (obj->import_attach) {
349 		if (private->domain) {
350 			rockchip_gem_iommu_unmap(rk_obj);
351 		} else {
352 			dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
353 				     rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
354 		}
355 		drm_prime_gem_destroy(obj, rk_obj->sgt);
356 	} else {
357 		rockchip_gem_free_buf(rk_obj);
358 	}
359 
360 	rockchip_gem_release_object(rk_obj);
361 }
362 
363 /*
364  * rockchip_gem_create_with_handle - allocate an object with the given
365  * size and create a gem handle on it
366  *
367  * returns a struct rockchip_gem_object* on success or ERR_PTR values
368  * on failure.
369  */
370 static struct rockchip_gem_object *
371 rockchip_gem_create_with_handle(struct drm_file *file_priv,
372 				struct drm_device *drm, unsigned int size,
373 				unsigned int *handle)
374 {
375 	struct rockchip_gem_object *rk_obj;
376 	struct drm_gem_object *obj;
377 	int ret;
378 
379 	rk_obj = rockchip_gem_create_object(drm, size, false);
380 	if (IS_ERR(rk_obj))
381 		return ERR_CAST(rk_obj);
382 
383 	obj = &rk_obj->base;
384 
385 	/*
386 	 * allocate a id of idr table where the obj is registered
387 	 * and handle has the id what user can see.
388 	 */
389 	ret = drm_gem_handle_create(file_priv, obj, handle);
390 	if (ret)
391 		goto err_handle_create;
392 
393 	/* drop reference from allocate - handle holds it now. */
394 	drm_gem_object_put_unlocked(obj);
395 
396 	return rk_obj;
397 
398 err_handle_create:
399 	rockchip_gem_free_object(obj);
400 
401 	return ERR_PTR(ret);
402 }
403 
404 /*
405  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
406  * function
407  *
408  * This aligns the pitch and size arguments to the minimum required. wrap
409  * this into your own function if you need bigger alignment.
410  */
411 int rockchip_gem_dumb_create(struct drm_file *file_priv,
412 			     struct drm_device *dev,
413 			     struct drm_mode_create_dumb *args)
414 {
415 	struct rockchip_gem_object *rk_obj;
416 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
417 
418 	/*
419 	 * align to 64 bytes since Mali requires it.
420 	 */
421 	args->pitch = ALIGN(min_pitch, 64);
422 	args->size = args->pitch * args->height;
423 
424 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
425 						 &args->handle);
426 
427 	return PTR_ERR_OR_ZERO(rk_obj);
428 }
429 
430 /*
431  * Allocate a sg_table for this GEM object.
432  * Note: Both the table's contents, and the sg_table itself must be freed by
433  *       the caller.
434  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
435  */
436 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
437 {
438 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
439 	struct drm_device *drm = obj->dev;
440 	struct sg_table *sgt;
441 	int ret;
442 
443 	if (rk_obj->pages)
444 		return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
445 
446 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
447 	if (!sgt)
448 		return ERR_PTR(-ENOMEM);
449 
450 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
451 				    rk_obj->dma_addr, obj->size,
452 				    rk_obj->dma_attrs);
453 	if (ret) {
454 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
455 		kfree(sgt);
456 		return ERR_PTR(ret);
457 	}
458 
459 	return sgt;
460 }
461 
462 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
463 						     int count)
464 {
465 	struct scatterlist *s;
466 	dma_addr_t expected = sg_dma_address(sgt->sgl);
467 	unsigned int i;
468 	unsigned long size = 0;
469 
470 	for_each_sg(sgt->sgl, s, count, i) {
471 		if (sg_dma_address(s) != expected)
472 			break;
473 		expected = sg_dma_address(s) + sg_dma_len(s);
474 		size += sg_dma_len(s);
475 	}
476 	return size;
477 }
478 
479 static int
480 rockchip_gem_iommu_map_sg(struct drm_device *drm,
481 			  struct dma_buf_attachment *attach,
482 			  struct sg_table *sg,
483 			  struct rockchip_gem_object *rk_obj)
484 {
485 	rk_obj->sgt = sg;
486 	return rockchip_gem_iommu_map(rk_obj);
487 }
488 
489 static int
490 rockchip_gem_dma_map_sg(struct drm_device *drm,
491 			struct dma_buf_attachment *attach,
492 			struct sg_table *sg,
493 			struct rockchip_gem_object *rk_obj)
494 {
495 	int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
496 			       DMA_BIDIRECTIONAL);
497 	if (!count)
498 		return -EINVAL;
499 
500 	if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
501 		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
502 		dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
503 			     DMA_BIDIRECTIONAL);
504 		return -EINVAL;
505 	}
506 
507 	rk_obj->dma_addr = sg_dma_address(sg->sgl);
508 	rk_obj->sgt = sg;
509 	return 0;
510 }
511 
512 struct drm_gem_object *
513 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
514 				   struct dma_buf_attachment *attach,
515 				   struct sg_table *sg)
516 {
517 	struct rockchip_drm_private *private = drm->dev_private;
518 	struct rockchip_gem_object *rk_obj;
519 	int ret;
520 
521 	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
522 	if (IS_ERR(rk_obj))
523 		return ERR_CAST(rk_obj);
524 
525 	if (private->domain)
526 		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
527 	else
528 		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
529 
530 	if (ret < 0) {
531 		DRM_ERROR("failed to import sg table: %d\n", ret);
532 		goto err_free_rk_obj;
533 	}
534 
535 	return &rk_obj->base;
536 
537 err_free_rk_obj:
538 	rockchip_gem_release_object(rk_obj);
539 	return ERR_PTR(ret);
540 }
541 
542 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
543 {
544 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
545 
546 	if (rk_obj->pages)
547 		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
548 			    pgprot_writecombine(PAGE_KERNEL));
549 
550 	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
551 		return NULL;
552 
553 	return rk_obj->kvaddr;
554 }
555 
556 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
557 {
558 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
559 
560 	if (rk_obj->pages) {
561 		vunmap(vaddr);
562 		return;
563 	}
564 
565 	/* Nothing to do if allocated by DMA mapping API. */
566 }
567