xref: /openbmc/linux/drivers/gpu/drm/tegra/gem.c (revision 1f9f6a78)
1 /*
2  * NVIDIA Tegra DRM GEM helper functions
3  *
4  * Copyright (C) 2012 Sascha Hauer, Pengutronix
5  * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6  *
7  * Based on the GEM/CMA helpers
8  *
9  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 #include <linux/dma-buf.h>
17 #include <linux/iommu.h>
18 #include <drm/tegra_drm.h>
19 
20 #include "drm.h"
21 #include "gem.h"
22 
23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
24 {
25 	return container_of(bo, struct tegra_bo, base);
26 }
27 
28 static void tegra_bo_put(struct host1x_bo *bo)
29 {
30 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
31 	struct drm_device *drm = obj->gem.dev;
32 
33 	mutex_lock(&drm->struct_mutex);
34 	drm_gem_object_unreference(&obj->gem);
35 	mutex_unlock(&drm->struct_mutex);
36 }
37 
38 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
39 {
40 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
41 
42 	return obj->paddr;
43 }
44 
45 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
46 {
47 }
48 
49 static void *tegra_bo_mmap(struct host1x_bo *bo)
50 {
51 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
52 
53 	return obj->vaddr;
54 }
55 
56 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
57 {
58 }
59 
60 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
61 {
62 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
63 
64 	return obj->vaddr + page * PAGE_SIZE;
65 }
66 
67 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
68 			    void *addr)
69 {
70 }
71 
72 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
73 {
74 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
75 	struct drm_device *drm = obj->gem.dev;
76 
77 	mutex_lock(&drm->struct_mutex);
78 	drm_gem_object_reference(&obj->gem);
79 	mutex_unlock(&drm->struct_mutex);
80 
81 	return bo;
82 }
83 
84 static const struct host1x_bo_ops tegra_bo_ops = {
85 	.get = tegra_bo_get,
86 	.put = tegra_bo_put,
87 	.pin = tegra_bo_pin,
88 	.unpin = tegra_bo_unpin,
89 	.mmap = tegra_bo_mmap,
90 	.munmap = tegra_bo_munmap,
91 	.kmap = tegra_bo_kmap,
92 	.kunmap = tegra_bo_kunmap,
93 };
94 
95 /*
96  * A generic iommu_map_sg() function is being reviewed and will hopefully be
97  * merged soon. At that point this function can be dropped in favour of the
98  * one provided by the IOMMU API.
99  */
100 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
101 			      struct scatterlist *sg, unsigned int nents,
102 			      int prot)
103 {
104 	struct scatterlist *s;
105 	size_t offset = 0;
106 	unsigned int i;
107 	int err;
108 
109 	for_each_sg(sg, s, nents, i) {
110 		phys_addr_t phys = page_to_phys(sg_page(s));
111 		size_t length = s->offset + s->length;
112 
113 		err = iommu_map(domain, iova + offset, phys, length, prot);
114 		if (err < 0) {
115 			iommu_unmap(domain, iova, offset);
116 			return err;
117 		}
118 
119 		offset += length;
120 	}
121 
122 	return offset;
123 }
124 
125 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
126 {
127 	int prot = IOMMU_READ | IOMMU_WRITE;
128 	ssize_t err;
129 
130 	if (bo->mm)
131 		return -EBUSY;
132 
133 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
134 	if (!bo->mm)
135 		return -ENOMEM;
136 
137 	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
138 					 PAGE_SIZE, 0, 0, 0);
139 	if (err < 0) {
140 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
141 			err);
142 		goto free;
143 	}
144 
145 	bo->paddr = bo->mm->start;
146 
147 	err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
148 			     bo->sgt->nents, prot);
149 	if (err < 0) {
150 		dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
151 		goto remove;
152 	}
153 
154 	bo->size = err;
155 
156 	return 0;
157 
158 remove:
159 	drm_mm_remove_node(bo->mm);
160 free:
161 	kfree(bo->mm);
162 	return err;
163 }
164 
165 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
166 {
167 	if (!bo->mm)
168 		return 0;
169 
170 	iommu_unmap(tegra->domain, bo->paddr, bo->size);
171 	drm_mm_remove_node(bo->mm);
172 	kfree(bo->mm);
173 
174 	return 0;
175 }
176 
177 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
178 					      size_t size)
179 {
180 	struct tegra_bo *bo;
181 	int err;
182 
183 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
184 	if (!bo)
185 		return ERR_PTR(-ENOMEM);
186 
187 	host1x_bo_init(&bo->base, &tegra_bo_ops);
188 	size = round_up(size, PAGE_SIZE);
189 
190 	err = drm_gem_object_init(drm, &bo->gem, size);
191 	if (err < 0)
192 		goto free;
193 
194 	err = drm_gem_create_mmap_offset(&bo->gem);
195 	if (err < 0)
196 		goto release;
197 
198 	return bo;
199 
200 release:
201 	drm_gem_object_release(&bo->gem);
202 free:
203 	kfree(bo);
204 	return ERR_PTR(err);
205 }
206 
207 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
208 {
209 	if (bo->pages) {
210 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
211 		sg_free_table(bo->sgt);
212 		kfree(bo->sgt);
213 	} else if (bo->vaddr) {
214 		dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
215 				      bo->paddr);
216 	}
217 }
218 
219 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
220 			      size_t size)
221 {
222 	bo->pages = drm_gem_get_pages(&bo->gem);
223 	if (IS_ERR(bo->pages))
224 		return PTR_ERR(bo->pages);
225 
226 	bo->num_pages = size >> PAGE_SHIFT;
227 
228 	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
229 	if (IS_ERR(bo->sgt)) {
230 		drm_gem_put_pages(&bo->gem, bo->pages, false, false);
231 		return PTR_ERR(bo->sgt);
232 	}
233 
234 	return 0;
235 }
236 
237 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
238 			  size_t size)
239 {
240 	struct tegra_drm *tegra = drm->dev_private;
241 	int err;
242 
243 	if (tegra->domain) {
244 		err = tegra_bo_get_pages(drm, bo, size);
245 		if (err < 0)
246 			return err;
247 
248 		err = tegra_bo_iommu_map(tegra, bo);
249 		if (err < 0) {
250 			tegra_bo_free(drm, bo);
251 			return err;
252 		}
253 	} else {
254 		bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
255 						   GFP_KERNEL | __GFP_NOWARN);
256 		if (!bo->vaddr) {
257 			dev_err(drm->dev,
258 				"failed to allocate buffer of size %zu\n",
259 				size);
260 			return -ENOMEM;
261 		}
262 	}
263 
264 	return 0;
265 }
266 
267 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
268 				 unsigned long flags)
269 {
270 	struct tegra_bo *bo;
271 	int err;
272 
273 	bo = tegra_bo_alloc_object(drm, size);
274 	if (IS_ERR(bo))
275 		return bo;
276 
277 	err = tegra_bo_alloc(drm, bo, size);
278 	if (err < 0)
279 		goto release;
280 
281 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
282 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
283 
284 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
285 		bo->flags |= TEGRA_BO_BOTTOM_UP;
286 
287 	return bo;
288 
289 release:
290 	drm_gem_object_release(&bo->gem);
291 	kfree(bo);
292 	return ERR_PTR(err);
293 }
294 
295 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
296 					     struct drm_device *drm,
297 					     size_t size,
298 					     unsigned long flags,
299 					     u32 *handle)
300 {
301 	struct tegra_bo *bo;
302 	int err;
303 
304 	bo = tegra_bo_create(drm, size, flags);
305 	if (IS_ERR(bo))
306 		return bo;
307 
308 	err = drm_gem_handle_create(file, &bo->gem, handle);
309 	if (err) {
310 		tegra_bo_free_object(&bo->gem);
311 		return ERR_PTR(err);
312 	}
313 
314 	drm_gem_object_unreference_unlocked(&bo->gem);
315 
316 	return bo;
317 }
318 
319 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
320 					struct dma_buf *buf)
321 {
322 	struct tegra_drm *tegra = drm->dev_private;
323 	struct dma_buf_attachment *attach;
324 	struct tegra_bo *bo;
325 	int err;
326 
327 	bo = tegra_bo_alloc_object(drm, buf->size);
328 	if (IS_ERR(bo))
329 		return bo;
330 
331 	attach = dma_buf_attach(buf, drm->dev);
332 	if (IS_ERR(attach)) {
333 		err = PTR_ERR(attach);
334 		goto free;
335 	}
336 
337 	get_dma_buf(buf);
338 
339 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
340 	if (!bo->sgt) {
341 		err = -ENOMEM;
342 		goto detach;
343 	}
344 
345 	if (IS_ERR(bo->sgt)) {
346 		err = PTR_ERR(bo->sgt);
347 		goto detach;
348 	}
349 
350 	if (tegra->domain) {
351 		err = tegra_bo_iommu_map(tegra, bo);
352 		if (err < 0)
353 			goto detach;
354 	} else {
355 		if (bo->sgt->nents > 1) {
356 			err = -EINVAL;
357 			goto detach;
358 		}
359 
360 		bo->paddr = sg_dma_address(bo->sgt->sgl);
361 	}
362 
363 	bo->gem.import_attach = attach;
364 
365 	return bo;
366 
367 detach:
368 	if (!IS_ERR_OR_NULL(bo->sgt))
369 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
370 
371 	dma_buf_detach(buf, attach);
372 	dma_buf_put(buf);
373 free:
374 	drm_gem_object_release(&bo->gem);
375 	kfree(bo);
376 	return ERR_PTR(err);
377 }
378 
379 void tegra_bo_free_object(struct drm_gem_object *gem)
380 {
381 	struct tegra_drm *tegra = gem->dev->dev_private;
382 	struct tegra_bo *bo = to_tegra_bo(gem);
383 
384 	if (tegra->domain)
385 		tegra_bo_iommu_unmap(tegra, bo);
386 
387 	if (gem->import_attach) {
388 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
389 					 DMA_TO_DEVICE);
390 		drm_prime_gem_destroy(gem, NULL);
391 	} else {
392 		tegra_bo_free(gem->dev, bo);
393 	}
394 
395 	drm_gem_object_release(gem);
396 	kfree(bo);
397 }
398 
399 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
400 			 struct drm_mode_create_dumb *args)
401 {
402 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
403 	struct tegra_drm *tegra = drm->dev_private;
404 	struct tegra_bo *bo;
405 
406 	args->pitch = round_up(min_pitch, tegra->pitch_align);
407 	args->size = args->pitch * args->height;
408 
409 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
410 					 &args->handle);
411 	if (IS_ERR(bo))
412 		return PTR_ERR(bo);
413 
414 	return 0;
415 }
416 
417 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
418 			     u32 handle, u64 *offset)
419 {
420 	struct drm_gem_object *gem;
421 	struct tegra_bo *bo;
422 
423 	mutex_lock(&drm->struct_mutex);
424 
425 	gem = drm_gem_object_lookup(drm, file, handle);
426 	if (!gem) {
427 		dev_err(drm->dev, "failed to lookup GEM object\n");
428 		mutex_unlock(&drm->struct_mutex);
429 		return -EINVAL;
430 	}
431 
432 	bo = to_tegra_bo(gem);
433 
434 	*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
435 
436 	drm_gem_object_unreference(gem);
437 
438 	mutex_unlock(&drm->struct_mutex);
439 
440 	return 0;
441 }
442 
443 static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
444 {
445 	struct drm_gem_object *gem = vma->vm_private_data;
446 	struct tegra_bo *bo = to_tegra_bo(gem);
447 	struct page *page;
448 	pgoff_t offset;
449 	int err;
450 
451 	if (!bo->pages)
452 		return VM_FAULT_SIGBUS;
453 
454 	offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
455 	page = bo->pages[offset];
456 
457 	err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
458 	switch (err) {
459 	case -EAGAIN:
460 	case 0:
461 	case -ERESTARTSYS:
462 	case -EINTR:
463 	case -EBUSY:
464 		return VM_FAULT_NOPAGE;
465 
466 	case -ENOMEM:
467 		return VM_FAULT_OOM;
468 	}
469 
470 	return VM_FAULT_SIGBUS;
471 }
472 
473 const struct vm_operations_struct tegra_bo_vm_ops = {
474 	.fault = tegra_bo_fault,
475 	.open = drm_gem_vm_open,
476 	.close = drm_gem_vm_close,
477 };
478 
479 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
480 {
481 	struct drm_gem_object *gem;
482 	struct tegra_bo *bo;
483 	int ret;
484 
485 	ret = drm_gem_mmap(file, vma);
486 	if (ret)
487 		return ret;
488 
489 	gem = vma->vm_private_data;
490 	bo = to_tegra_bo(gem);
491 
492 	if (!bo->pages) {
493 		unsigned long vm_pgoff = vma->vm_pgoff;
494 
495 		vma->vm_flags &= ~VM_PFNMAP;
496 		vma->vm_pgoff = 0;
497 
498 		ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
499 					    bo->paddr, gem->size);
500 		if (ret) {
501 			drm_gem_vm_close(vma);
502 			return ret;
503 		}
504 
505 		vma->vm_pgoff = vm_pgoff;
506 	} else {
507 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
508 
509 		vma->vm_flags |= VM_MIXEDMAP;
510 		vma->vm_flags &= ~VM_PFNMAP;
511 
512 		vma->vm_page_prot = pgprot_writecombine(prot);
513 	}
514 
515 	return 0;
516 }
517 
518 static struct sg_table *
519 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
520 			    enum dma_data_direction dir)
521 {
522 	struct drm_gem_object *gem = attach->dmabuf->priv;
523 	struct tegra_bo *bo = to_tegra_bo(gem);
524 	struct sg_table *sgt;
525 
526 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
527 	if (!sgt)
528 		return NULL;
529 
530 	if (bo->pages) {
531 		struct scatterlist *sg;
532 		unsigned int i;
533 
534 		if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
535 			goto free;
536 
537 		for_each_sg(sgt->sgl, sg, bo->num_pages, i)
538 			sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
539 
540 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
541 			goto free;
542 	} else {
543 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
544 			goto free;
545 
546 		sg_dma_address(sgt->sgl) = bo->paddr;
547 		sg_dma_len(sgt->sgl) = gem->size;
548 	}
549 
550 	return sgt;
551 
552 free:
553 	sg_free_table(sgt);
554 	kfree(sgt);
555 	return NULL;
556 }
557 
558 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
559 					  struct sg_table *sgt,
560 					  enum dma_data_direction dir)
561 {
562 	struct drm_gem_object *gem = attach->dmabuf->priv;
563 	struct tegra_bo *bo = to_tegra_bo(gem);
564 
565 	if (bo->pages)
566 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
567 
568 	sg_free_table(sgt);
569 	kfree(sgt);
570 }
571 
572 static void tegra_gem_prime_release(struct dma_buf *buf)
573 {
574 	drm_gem_dmabuf_release(buf);
575 }
576 
577 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
578 					 unsigned long page)
579 {
580 	return NULL;
581 }
582 
583 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
584 					  unsigned long page,
585 					  void *addr)
586 {
587 }
588 
589 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
590 {
591 	return NULL;
592 }
593 
594 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
595 				   void *addr)
596 {
597 }
598 
599 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
600 {
601 	return -EINVAL;
602 }
603 
604 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
605 {
606 	struct drm_gem_object *gem = buf->priv;
607 	struct tegra_bo *bo = to_tegra_bo(gem);
608 
609 	return bo->vaddr;
610 }
611 
612 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
613 {
614 }
615 
616 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
617 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
618 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
619 	.release = tegra_gem_prime_release,
620 	.kmap_atomic = tegra_gem_prime_kmap_atomic,
621 	.kunmap_atomic = tegra_gem_prime_kunmap_atomic,
622 	.kmap = tegra_gem_prime_kmap,
623 	.kunmap = tegra_gem_prime_kunmap,
624 	.mmap = tegra_gem_prime_mmap,
625 	.vmap = tegra_gem_prime_vmap,
626 	.vunmap = tegra_gem_prime_vunmap,
627 };
628 
629 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
630 				       struct drm_gem_object *gem,
631 				       int flags)
632 {
633 	return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
634 			      flags, NULL);
635 }
636 
637 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
638 					      struct dma_buf *buf)
639 {
640 	struct tegra_bo *bo;
641 
642 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
643 		struct drm_gem_object *gem = buf->priv;
644 
645 		if (gem->dev == drm) {
646 			drm_gem_object_reference(gem);
647 			return gem;
648 		}
649 	}
650 
651 	bo = tegra_bo_import(drm, buf);
652 	if (IS_ERR(bo))
653 		return ERR_CAST(bo);
654 
655 	return &bo->gem;
656 }
657