xref: /openbmc/linux/drivers/gpu/drm/tegra/gem.c (revision d236d361)
1 /*
2  * NVIDIA Tegra DRM GEM helper functions
3  *
4  * Copyright (C) 2012 Sascha Hauer, Pengutronix
5  * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
6  *
7  * Based on the GEM/CMA helpers
8  *
9  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 #include <linux/dma-buf.h>
17 #include <linux/iommu.h>
18 #include <drm/tegra_drm.h>
19 
20 #include "drm.h"
21 #include "gem.h"
22 
23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
24 {
25 	return container_of(bo, struct tegra_bo, base);
26 }
27 
28 static void tegra_bo_put(struct host1x_bo *bo)
29 {
30 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
31 
32 	drm_gem_object_unreference_unlocked(&obj->gem);
33 }
34 
35 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
36 {
37 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
38 
39 	*sgt = obj->sgt;
40 
41 	return obj->paddr;
42 }
43 
44 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
45 {
46 }
47 
48 static void *tegra_bo_mmap(struct host1x_bo *bo)
49 {
50 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
51 
52 	if (obj->vaddr)
53 		return obj->vaddr;
54 	else if (obj->gem.import_attach)
55 		return dma_buf_vmap(obj->gem.import_attach->dmabuf);
56 	else
57 		return vmap(obj->pages, obj->num_pages, VM_MAP,
58 			    pgprot_writecombine(PAGE_KERNEL));
59 }
60 
61 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
62 {
63 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
64 
65 	if (obj->vaddr)
66 		return;
67 	else if (obj->gem.import_attach)
68 		dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
69 	else
70 		vunmap(addr);
71 }
72 
73 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
74 {
75 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
76 
77 	if (obj->vaddr)
78 		return obj->vaddr + page * PAGE_SIZE;
79 	else if (obj->gem.import_attach)
80 		return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
81 	else
82 		return vmap(obj->pages + page, 1, VM_MAP,
83 			    pgprot_writecombine(PAGE_KERNEL));
84 }
85 
86 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
87 			    void *addr)
88 {
89 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
90 
91 	if (obj->vaddr)
92 		return;
93 	else if (obj->gem.import_attach)
94 		dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
95 	else
96 		vunmap(addr);
97 }
98 
99 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
100 {
101 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
102 
103 	drm_gem_object_reference(&obj->gem);
104 
105 	return bo;
106 }
107 
108 static const struct host1x_bo_ops tegra_bo_ops = {
109 	.get = tegra_bo_get,
110 	.put = tegra_bo_put,
111 	.pin = tegra_bo_pin,
112 	.unpin = tegra_bo_unpin,
113 	.mmap = tegra_bo_mmap,
114 	.munmap = tegra_bo_munmap,
115 	.kmap = tegra_bo_kmap,
116 	.kunmap = tegra_bo_kunmap,
117 };
118 
119 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
120 {
121 	int prot = IOMMU_READ | IOMMU_WRITE;
122 	ssize_t err;
123 
124 	if (bo->mm)
125 		return -EBUSY;
126 
127 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
128 	if (!bo->mm)
129 		return -ENOMEM;
130 
131 	mutex_lock(&tegra->mm_lock);
132 
133 	err = drm_mm_insert_node_generic(&tegra->mm,
134 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
135 	if (err < 0) {
136 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
137 			err);
138 		goto unlock;
139 	}
140 
141 	bo->paddr = bo->mm->start;
142 
143 	err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
144 			   bo->sgt->nents, prot);
145 	if (err < 0) {
146 		dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
147 		goto remove;
148 	}
149 
150 	bo->size = err;
151 
152 	mutex_unlock(&tegra->mm_lock);
153 
154 	return 0;
155 
156 remove:
157 	drm_mm_remove_node(bo->mm);
158 unlock:
159 	mutex_unlock(&tegra->mm_lock);
160 	kfree(bo->mm);
161 	return err;
162 }
163 
164 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
165 {
166 	if (!bo->mm)
167 		return 0;
168 
169 	mutex_lock(&tegra->mm_lock);
170 	iommu_unmap(tegra->domain, bo->paddr, bo->size);
171 	drm_mm_remove_node(bo->mm);
172 	mutex_unlock(&tegra->mm_lock);
173 
174 	kfree(bo->mm);
175 
176 	return 0;
177 }
178 
179 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
180 					      size_t size)
181 {
182 	struct tegra_bo *bo;
183 	int err;
184 
185 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
186 	if (!bo)
187 		return ERR_PTR(-ENOMEM);
188 
189 	host1x_bo_init(&bo->base, &tegra_bo_ops);
190 	size = round_up(size, PAGE_SIZE);
191 
192 	err = drm_gem_object_init(drm, &bo->gem, size);
193 	if (err < 0)
194 		goto free;
195 
196 	err = drm_gem_create_mmap_offset(&bo->gem);
197 	if (err < 0)
198 		goto release;
199 
200 	return bo;
201 
202 release:
203 	drm_gem_object_release(&bo->gem);
204 free:
205 	kfree(bo);
206 	return ERR_PTR(err);
207 }
208 
209 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
210 {
211 	if (bo->pages) {
212 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
213 		sg_free_table(bo->sgt);
214 		kfree(bo->sgt);
215 	} else if (bo->vaddr) {
216 		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
217 	}
218 }
219 
220 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
221 {
222 	struct scatterlist *s;
223 	unsigned int i;
224 
225 	bo->pages = drm_gem_get_pages(&bo->gem);
226 	if (IS_ERR(bo->pages))
227 		return PTR_ERR(bo->pages);
228 
229 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
230 
231 	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
232 	if (IS_ERR(bo->sgt))
233 		goto put_pages;
234 
235 	/*
236 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
237 	 * to flush the pages associated with it.
238 	 *
239 	 * TODO: Replace this by drm_clflash_sg() once it can be implemented
240 	 * without relying on symbols that are not exported.
241 	 */
242 	for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
243 		sg_dma_address(s) = sg_phys(s);
244 
245 	dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
246 			       DMA_TO_DEVICE);
247 
248 	return 0;
249 
250 put_pages:
251 	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
252 	return PTR_ERR(bo->sgt);
253 }
254 
255 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
256 {
257 	struct tegra_drm *tegra = drm->dev_private;
258 	int err;
259 
260 	if (tegra->domain) {
261 		err = tegra_bo_get_pages(drm, bo);
262 		if (err < 0)
263 			return err;
264 
265 		err = tegra_bo_iommu_map(tegra, bo);
266 		if (err < 0) {
267 			tegra_bo_free(drm, bo);
268 			return err;
269 		}
270 	} else {
271 		size_t size = bo->gem.size;
272 
273 		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
274 					 GFP_KERNEL | __GFP_NOWARN);
275 		if (!bo->vaddr) {
276 			dev_err(drm->dev,
277 				"failed to allocate buffer of size %zu\n",
278 				size);
279 			return -ENOMEM;
280 		}
281 	}
282 
283 	return 0;
284 }
285 
286 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
287 				 unsigned long flags)
288 {
289 	struct tegra_bo *bo;
290 	int err;
291 
292 	bo = tegra_bo_alloc_object(drm, size);
293 	if (IS_ERR(bo))
294 		return bo;
295 
296 	err = tegra_bo_alloc(drm, bo);
297 	if (err < 0)
298 		goto release;
299 
300 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
301 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
302 
303 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
304 		bo->flags |= TEGRA_BO_BOTTOM_UP;
305 
306 	return bo;
307 
308 release:
309 	drm_gem_object_release(&bo->gem);
310 	kfree(bo);
311 	return ERR_PTR(err);
312 }
313 
314 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
315 					     struct drm_device *drm,
316 					     size_t size,
317 					     unsigned long flags,
318 					     u32 *handle)
319 {
320 	struct tegra_bo *bo;
321 	int err;
322 
323 	bo = tegra_bo_create(drm, size, flags);
324 	if (IS_ERR(bo))
325 		return bo;
326 
327 	err = drm_gem_handle_create(file, &bo->gem, handle);
328 	if (err) {
329 		tegra_bo_free_object(&bo->gem);
330 		return ERR_PTR(err);
331 	}
332 
333 	drm_gem_object_unreference_unlocked(&bo->gem);
334 
335 	return bo;
336 }
337 
338 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
339 					struct dma_buf *buf)
340 {
341 	struct tegra_drm *tegra = drm->dev_private;
342 	struct dma_buf_attachment *attach;
343 	struct tegra_bo *bo;
344 	int err;
345 
346 	bo = tegra_bo_alloc_object(drm, buf->size);
347 	if (IS_ERR(bo))
348 		return bo;
349 
350 	attach = dma_buf_attach(buf, drm->dev);
351 	if (IS_ERR(attach)) {
352 		err = PTR_ERR(attach);
353 		goto free;
354 	}
355 
356 	get_dma_buf(buf);
357 
358 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
359 	if (IS_ERR(bo->sgt)) {
360 		err = PTR_ERR(bo->sgt);
361 		goto detach;
362 	}
363 
364 	if (tegra->domain) {
365 		err = tegra_bo_iommu_map(tegra, bo);
366 		if (err < 0)
367 			goto detach;
368 	} else {
369 		if (bo->sgt->nents > 1) {
370 			err = -EINVAL;
371 			goto detach;
372 		}
373 
374 		bo->paddr = sg_dma_address(bo->sgt->sgl);
375 	}
376 
377 	bo->gem.import_attach = attach;
378 
379 	return bo;
380 
381 detach:
382 	if (!IS_ERR_OR_NULL(bo->sgt))
383 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
384 
385 	dma_buf_detach(buf, attach);
386 	dma_buf_put(buf);
387 free:
388 	drm_gem_object_release(&bo->gem);
389 	kfree(bo);
390 	return ERR_PTR(err);
391 }
392 
393 void tegra_bo_free_object(struct drm_gem_object *gem)
394 {
395 	struct tegra_drm *tegra = gem->dev->dev_private;
396 	struct tegra_bo *bo = to_tegra_bo(gem);
397 
398 	if (tegra->domain)
399 		tegra_bo_iommu_unmap(tegra, bo);
400 
401 	if (gem->import_attach) {
402 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
403 					 DMA_TO_DEVICE);
404 		drm_prime_gem_destroy(gem, NULL);
405 	} else {
406 		tegra_bo_free(gem->dev, bo);
407 	}
408 
409 	drm_gem_object_release(gem);
410 	kfree(bo);
411 }
412 
413 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
414 			 struct drm_mode_create_dumb *args)
415 {
416 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
417 	struct tegra_drm *tegra = drm->dev_private;
418 	struct tegra_bo *bo;
419 
420 	args->pitch = round_up(min_pitch, tegra->pitch_align);
421 	args->size = args->pitch * args->height;
422 
423 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
424 					 &args->handle);
425 	if (IS_ERR(bo))
426 		return PTR_ERR(bo);
427 
428 	return 0;
429 }
430 
431 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
432 			     u32 handle, u64 *offset)
433 {
434 	struct drm_gem_object *gem;
435 	struct tegra_bo *bo;
436 
437 	gem = drm_gem_object_lookup(file, handle);
438 	if (!gem) {
439 		dev_err(drm->dev, "failed to lookup GEM object\n");
440 		return -EINVAL;
441 	}
442 
443 	bo = to_tegra_bo(gem);
444 
445 	*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
446 
447 	drm_gem_object_unreference_unlocked(gem);
448 
449 	return 0;
450 }
451 
452 static int tegra_bo_fault(struct vm_fault *vmf)
453 {
454 	struct vm_area_struct *vma = vmf->vma;
455 	struct drm_gem_object *gem = vma->vm_private_data;
456 	struct tegra_bo *bo = to_tegra_bo(gem);
457 	struct page *page;
458 	pgoff_t offset;
459 	int err;
460 
461 	if (!bo->pages)
462 		return VM_FAULT_SIGBUS;
463 
464 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
465 	page = bo->pages[offset];
466 
467 	err = vm_insert_page(vma, vmf->address, page);
468 	switch (err) {
469 	case -EAGAIN:
470 	case 0:
471 	case -ERESTARTSYS:
472 	case -EINTR:
473 	case -EBUSY:
474 		return VM_FAULT_NOPAGE;
475 
476 	case -ENOMEM:
477 		return VM_FAULT_OOM;
478 	}
479 
480 	return VM_FAULT_SIGBUS;
481 }
482 
483 const struct vm_operations_struct tegra_bo_vm_ops = {
484 	.fault = tegra_bo_fault,
485 	.open = drm_gem_vm_open,
486 	.close = drm_gem_vm_close,
487 };
488 
489 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
490 {
491 	struct drm_gem_object *gem;
492 	struct tegra_bo *bo;
493 	int ret;
494 
495 	ret = drm_gem_mmap(file, vma);
496 	if (ret)
497 		return ret;
498 
499 	gem = vma->vm_private_data;
500 	bo = to_tegra_bo(gem);
501 
502 	if (!bo->pages) {
503 		unsigned long vm_pgoff = vma->vm_pgoff;
504 
505 		vma->vm_flags &= ~VM_PFNMAP;
506 		vma->vm_pgoff = 0;
507 
508 		ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
509 				  gem->size);
510 		if (ret) {
511 			drm_gem_vm_close(vma);
512 			return ret;
513 		}
514 
515 		vma->vm_pgoff = vm_pgoff;
516 	} else {
517 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
518 
519 		vma->vm_flags |= VM_MIXEDMAP;
520 		vma->vm_flags &= ~VM_PFNMAP;
521 
522 		vma->vm_page_prot = pgprot_writecombine(prot);
523 	}
524 
525 	return 0;
526 }
527 
528 static struct sg_table *
529 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
530 			    enum dma_data_direction dir)
531 {
532 	struct drm_gem_object *gem = attach->dmabuf->priv;
533 	struct tegra_bo *bo = to_tegra_bo(gem);
534 	struct sg_table *sgt;
535 
536 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
537 	if (!sgt)
538 		return NULL;
539 
540 	if (bo->pages) {
541 		struct scatterlist *sg;
542 		unsigned int i;
543 
544 		if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
545 			goto free;
546 
547 		for_each_sg(sgt->sgl, sg, bo->num_pages, i)
548 			sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
549 
550 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
551 			goto free;
552 	} else {
553 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
554 			goto free;
555 
556 		sg_dma_address(sgt->sgl) = bo->paddr;
557 		sg_dma_len(sgt->sgl) = gem->size;
558 	}
559 
560 	return sgt;
561 
562 free:
563 	sg_free_table(sgt);
564 	kfree(sgt);
565 	return NULL;
566 }
567 
568 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
569 					  struct sg_table *sgt,
570 					  enum dma_data_direction dir)
571 {
572 	struct drm_gem_object *gem = attach->dmabuf->priv;
573 	struct tegra_bo *bo = to_tegra_bo(gem);
574 
575 	if (bo->pages)
576 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
577 
578 	sg_free_table(sgt);
579 	kfree(sgt);
580 }
581 
582 static void tegra_gem_prime_release(struct dma_buf *buf)
583 {
584 	drm_gem_dmabuf_release(buf);
585 }
586 
587 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
588 					 unsigned long page)
589 {
590 	return NULL;
591 }
592 
593 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
594 					  unsigned long page,
595 					  void *addr)
596 {
597 }
598 
599 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
600 {
601 	return NULL;
602 }
603 
604 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
605 				   void *addr)
606 {
607 }
608 
609 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
610 {
611 	return -EINVAL;
612 }
613 
614 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
615 {
616 	struct drm_gem_object *gem = buf->priv;
617 	struct tegra_bo *bo = to_tegra_bo(gem);
618 
619 	return bo->vaddr;
620 }
621 
622 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
623 {
624 }
625 
626 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
627 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
628 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
629 	.release = tegra_gem_prime_release,
630 	.map_atomic = tegra_gem_prime_kmap_atomic,
631 	.unmap_atomic = tegra_gem_prime_kunmap_atomic,
632 	.map = tegra_gem_prime_kmap,
633 	.unmap = tegra_gem_prime_kunmap,
634 	.mmap = tegra_gem_prime_mmap,
635 	.vmap = tegra_gem_prime_vmap,
636 	.vunmap = tegra_gem_prime_vunmap,
637 };
638 
639 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
640 				       struct drm_gem_object *gem,
641 				       int flags)
642 {
643 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
644 
645 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
646 	exp_info.size = gem->size;
647 	exp_info.flags = flags;
648 	exp_info.priv = gem;
649 
650 	return drm_gem_dmabuf_export(drm, &exp_info);
651 }
652 
653 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
654 					      struct dma_buf *buf)
655 {
656 	struct tegra_bo *bo;
657 
658 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
659 		struct drm_gem_object *gem = buf->priv;
660 
661 		if (gem->dev == drm) {
662 			drm_gem_object_reference(gem);
663 			return gem;
664 		}
665 	}
666 
667 	bo = tegra_bo_import(drm, buf);
668 	if (IS_ERR(bo))
669 		return ERR_CAST(bo);
670 
671 	return &bo->gem;
672 }
673