1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/iommu.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
15 
16 #include "rockchip_drm_drv.h"
17 #include "rockchip_drm_gem.h"
18 
19 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
20 {
21 	struct drm_device *drm = rk_obj->base.dev;
22 	struct rockchip_drm_private *private = drm->dev_private;
23 	int prot = IOMMU_READ | IOMMU_WRITE;
24 	ssize_t ret;
25 
26 	mutex_lock(&private->mm_lock);
27 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
28 					 rk_obj->base.size, PAGE_SIZE,
29 					 0, 0);
30 	mutex_unlock(&private->mm_lock);
31 
32 	if (ret < 0) {
33 		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
34 		return ret;
35 	}
36 
37 	rk_obj->dma_addr = rk_obj->mm.start;
38 
39 	ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
40 			   rk_obj->sgt->nents, prot);
41 	if (ret < rk_obj->base.size) {
42 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
43 			  ret, rk_obj->base.size);
44 		ret = -ENOMEM;
45 		goto err_remove_node;
46 	}
47 
48 	rk_obj->size = ret;
49 
50 	return 0;
51 
52 err_remove_node:
53 	mutex_lock(&private->mm_lock);
54 	drm_mm_remove_node(&rk_obj->mm);
55 	mutex_unlock(&private->mm_lock);
56 
57 	return ret;
58 }
59 
60 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
61 {
62 	struct drm_device *drm = rk_obj->base.dev;
63 	struct rockchip_drm_private *private = drm->dev_private;
64 
65 	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
66 
67 	mutex_lock(&private->mm_lock);
68 
69 	drm_mm_remove_node(&rk_obj->mm);
70 
71 	mutex_unlock(&private->mm_lock);
72 
73 	return 0;
74 }
75 
76 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
77 {
78 	struct drm_device *drm = rk_obj->base.dev;
79 	int ret, i;
80 	struct scatterlist *s;
81 
82 	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
83 	if (IS_ERR(rk_obj->pages))
84 		return PTR_ERR(rk_obj->pages);
85 
86 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
87 
88 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
89 	if (IS_ERR(rk_obj->sgt)) {
90 		ret = PTR_ERR(rk_obj->sgt);
91 		goto err_put_pages;
92 	}
93 
94 	/*
95 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
96 	 * to flush the pages associated with it.
97 	 *
98 	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
99 	 * without relying on symbols that are not exported.
100 	 */
101 	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
102 		sg_dma_address(s) = sg_phys(s);
103 
104 	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
105 			       DMA_TO_DEVICE);
106 
107 	return 0;
108 
109 err_put_pages:
110 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
111 	return ret;
112 }
113 
114 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
115 {
116 	sg_free_table(rk_obj->sgt);
117 	kfree(rk_obj->sgt);
118 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
119 }
120 
121 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
122 				    bool alloc_kmap)
123 {
124 	int ret;
125 
126 	ret = rockchip_gem_get_pages(rk_obj);
127 	if (ret < 0)
128 		return ret;
129 
130 	ret = rockchip_gem_iommu_map(rk_obj);
131 	if (ret < 0)
132 		goto err_free;
133 
134 	if (alloc_kmap) {
135 		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
136 				      pgprot_writecombine(PAGE_KERNEL));
137 		if (!rk_obj->kvaddr) {
138 			DRM_ERROR("failed to vmap() buffer\n");
139 			ret = -ENOMEM;
140 			goto err_unmap;
141 		}
142 	}
143 
144 	return 0;
145 
146 err_unmap:
147 	rockchip_gem_iommu_unmap(rk_obj);
148 err_free:
149 	rockchip_gem_put_pages(rk_obj);
150 
151 	return ret;
152 }
153 
154 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
155 				  bool alloc_kmap)
156 {
157 	struct drm_gem_object *obj = &rk_obj->base;
158 	struct drm_device *drm = obj->dev;
159 
160 	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
161 
162 	if (!alloc_kmap)
163 		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
164 
165 	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
166 					 &rk_obj->dma_addr, GFP_KERNEL,
167 					 rk_obj->dma_attrs);
168 	if (!rk_obj->kvaddr) {
169 		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
170 		return -ENOMEM;
171 	}
172 
173 	return 0;
174 }
175 
176 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
177 				  bool alloc_kmap)
178 {
179 	struct drm_gem_object *obj = &rk_obj->base;
180 	struct drm_device *drm = obj->dev;
181 	struct rockchip_drm_private *private = drm->dev_private;
182 
183 	if (private->domain)
184 		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
185 	else
186 		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
187 }
188 
189 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
190 {
191 	vunmap(rk_obj->kvaddr);
192 	rockchip_gem_iommu_unmap(rk_obj);
193 	rockchip_gem_put_pages(rk_obj);
194 }
195 
196 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
197 {
198 	struct drm_gem_object *obj = &rk_obj->base;
199 	struct drm_device *drm = obj->dev;
200 
201 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
202 		       rk_obj->dma_attrs);
203 }
204 
205 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
206 {
207 	if (rk_obj->pages)
208 		rockchip_gem_free_iommu(rk_obj);
209 	else
210 		rockchip_gem_free_dma(rk_obj);
211 }
212 
213 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
214 					      struct vm_area_struct *vma)
215 {
216 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
217 	unsigned int count = obj->size >> PAGE_SHIFT;
218 	unsigned long user_count = vma_pages(vma);
219 
220 	if (user_count == 0)
221 		return -ENXIO;
222 
223 	return vm_map_pages(vma, rk_obj->pages, count);
224 }
225 
226 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
227 					    struct vm_area_struct *vma)
228 {
229 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
230 	struct drm_device *drm = obj->dev;
231 
232 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
233 			      obj->size, rk_obj->dma_attrs);
234 }
235 
236 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
237 					struct vm_area_struct *vma)
238 {
239 	int ret;
240 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
241 
242 	/*
243 	 * We allocated a struct page table for rk_obj, so clear
244 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
245 	 */
246 	vma->vm_flags &= ~VM_PFNMAP;
247 
248 	if (rk_obj->pages)
249 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
250 	else
251 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
252 
253 	if (ret)
254 		drm_gem_vm_close(vma);
255 
256 	return ret;
257 }
258 
259 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
260 			  struct vm_area_struct *vma)
261 {
262 	int ret;
263 
264 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
265 	if (ret)
266 		return ret;
267 
268 	return rockchip_drm_gem_object_mmap(obj, vma);
269 }
270 
271 /* drm driver mmap file operations */
272 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
273 {
274 	struct drm_gem_object *obj;
275 	int ret;
276 
277 	ret = drm_gem_mmap(filp, vma);
278 	if (ret)
279 		return ret;
280 
281 	/*
282 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
283 	 * whole buffer from the start.
284 	 */
285 	vma->vm_pgoff = 0;
286 
287 	obj = vma->vm_private_data;
288 
289 	return rockchip_drm_gem_object_mmap(obj, vma);
290 }
291 
292 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
293 {
294 	drm_gem_object_release(&rk_obj->base);
295 	kfree(rk_obj);
296 }
297 
298 static struct rockchip_gem_object *
299 	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
300 {
301 	struct rockchip_gem_object *rk_obj;
302 	struct drm_gem_object *obj;
303 
304 	size = round_up(size, PAGE_SIZE);
305 
306 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
307 	if (!rk_obj)
308 		return ERR_PTR(-ENOMEM);
309 
310 	obj = &rk_obj->base;
311 
312 	drm_gem_object_init(drm, obj, size);
313 
314 	return rk_obj;
315 }
316 
317 struct rockchip_gem_object *
318 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
319 			   bool alloc_kmap)
320 {
321 	struct rockchip_gem_object *rk_obj;
322 	int ret;
323 
324 	rk_obj = rockchip_gem_alloc_object(drm, size);
325 	if (IS_ERR(rk_obj))
326 		return rk_obj;
327 
328 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
329 	if (ret)
330 		goto err_free_rk_obj;
331 
332 	return rk_obj;
333 
334 err_free_rk_obj:
335 	rockchip_gem_release_object(rk_obj);
336 	return ERR_PTR(ret);
337 }
338 
339 /*
340  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
341  * callback function
342  */
343 void rockchip_gem_free_object(struct drm_gem_object *obj)
344 {
345 	struct drm_device *drm = obj->dev;
346 	struct rockchip_drm_private *private = drm->dev_private;
347 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
348 
349 	if (obj->import_attach) {
350 		if (private->domain) {
351 			rockchip_gem_iommu_unmap(rk_obj);
352 		} else {
353 			dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
354 				     rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
355 		}
356 		drm_prime_gem_destroy(obj, rk_obj->sgt);
357 	} else {
358 		rockchip_gem_free_buf(rk_obj);
359 	}
360 
361 	rockchip_gem_release_object(rk_obj);
362 }
363 
364 /*
365  * rockchip_gem_create_with_handle - allocate an object with the given
366  * size and create a gem handle on it
367  *
368  * returns a struct rockchip_gem_object* on success or ERR_PTR values
369  * on failure.
370  */
371 static struct rockchip_gem_object *
372 rockchip_gem_create_with_handle(struct drm_file *file_priv,
373 				struct drm_device *drm, unsigned int size,
374 				unsigned int *handle)
375 {
376 	struct rockchip_gem_object *rk_obj;
377 	struct drm_gem_object *obj;
378 	int ret;
379 
380 	rk_obj = rockchip_gem_create_object(drm, size, false);
381 	if (IS_ERR(rk_obj))
382 		return ERR_CAST(rk_obj);
383 
384 	obj = &rk_obj->base;
385 
386 	/*
387 	 * allocate a id of idr table where the obj is registered
388 	 * and handle has the id what user can see.
389 	 */
390 	ret = drm_gem_handle_create(file_priv, obj, handle);
391 	if (ret)
392 		goto err_handle_create;
393 
394 	/* drop reference from allocate - handle holds it now. */
395 	drm_gem_object_put(obj);
396 
397 	return rk_obj;
398 
399 err_handle_create:
400 	rockchip_gem_free_object(obj);
401 
402 	return ERR_PTR(ret);
403 }
404 
405 /*
406  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
407  * function
408  *
409  * This aligns the pitch and size arguments to the minimum required. wrap
410  * this into your own function if you need bigger alignment.
411  */
412 int rockchip_gem_dumb_create(struct drm_file *file_priv,
413 			     struct drm_device *dev,
414 			     struct drm_mode_create_dumb *args)
415 {
416 	struct rockchip_gem_object *rk_obj;
417 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
418 
419 	/*
420 	 * align to 64 bytes since Mali requires it.
421 	 */
422 	args->pitch = ALIGN(min_pitch, 64);
423 	args->size = args->pitch * args->height;
424 
425 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
426 						 &args->handle);
427 
428 	return PTR_ERR_OR_ZERO(rk_obj);
429 }
430 
431 /*
432  * Allocate a sg_table for this GEM object.
433  * Note: Both the table's contents, and the sg_table itself must be freed by
434  *       the caller.
435  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
436  */
437 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
438 {
439 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
440 	struct drm_device *drm = obj->dev;
441 	struct sg_table *sgt;
442 	int ret;
443 
444 	if (rk_obj->pages)
445 		return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
446 
447 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
448 	if (!sgt)
449 		return ERR_PTR(-ENOMEM);
450 
451 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
452 				    rk_obj->dma_addr, obj->size,
453 				    rk_obj->dma_attrs);
454 	if (ret) {
455 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
456 		kfree(sgt);
457 		return ERR_PTR(ret);
458 	}
459 
460 	return sgt;
461 }
462 
463 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
464 						     int count)
465 {
466 	struct scatterlist *s;
467 	dma_addr_t expected = sg_dma_address(sgt->sgl);
468 	unsigned int i;
469 	unsigned long size = 0;
470 
471 	for_each_sg(sgt->sgl, s, count, i) {
472 		if (sg_dma_address(s) != expected)
473 			break;
474 		expected = sg_dma_address(s) + sg_dma_len(s);
475 		size += sg_dma_len(s);
476 	}
477 	return size;
478 }
479 
480 static int
481 rockchip_gem_iommu_map_sg(struct drm_device *drm,
482 			  struct dma_buf_attachment *attach,
483 			  struct sg_table *sg,
484 			  struct rockchip_gem_object *rk_obj)
485 {
486 	rk_obj->sgt = sg;
487 	return rockchip_gem_iommu_map(rk_obj);
488 }
489 
490 static int
491 rockchip_gem_dma_map_sg(struct drm_device *drm,
492 			struct dma_buf_attachment *attach,
493 			struct sg_table *sg,
494 			struct rockchip_gem_object *rk_obj)
495 {
496 	int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
497 			       DMA_BIDIRECTIONAL);
498 	if (!count)
499 		return -EINVAL;
500 
501 	if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
502 		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
503 		dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
504 			     DMA_BIDIRECTIONAL);
505 		return -EINVAL;
506 	}
507 
508 	rk_obj->dma_addr = sg_dma_address(sg->sgl);
509 	rk_obj->sgt = sg;
510 	return 0;
511 }
512 
513 struct drm_gem_object *
514 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
515 				   struct dma_buf_attachment *attach,
516 				   struct sg_table *sg)
517 {
518 	struct rockchip_drm_private *private = drm->dev_private;
519 	struct rockchip_gem_object *rk_obj;
520 	int ret;
521 
522 	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
523 	if (IS_ERR(rk_obj))
524 		return ERR_CAST(rk_obj);
525 
526 	if (private->domain)
527 		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
528 	else
529 		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
530 
531 	if (ret < 0) {
532 		DRM_ERROR("failed to import sg table: %d\n", ret);
533 		goto err_free_rk_obj;
534 	}
535 
536 	return &rk_obj->base;
537 
538 err_free_rk_obj:
539 	rockchip_gem_release_object(rk_obj);
540 	return ERR_PTR(ret);
541 }
542 
543 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
544 {
545 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
546 
547 	if (rk_obj->pages)
548 		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
549 			    pgprot_writecombine(PAGE_KERNEL));
550 
551 	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
552 		return NULL;
553 
554 	return rk_obj->kvaddr;
555 }
556 
557 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
558 {
559 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
560 
561 	if (rk_obj->pages) {
562 		vunmap(vaddr);
563 		return;
564 	}
565 
566 	/* Nothing to do if allocated by DMA mapping API. */
567 }
568