xref: /openbmc/linux/drivers/gpu/drm/tegra/gem.c (revision 9aab6601)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NVIDIA Tegra DRM GEM helper functions
4  *
5  * Copyright (C) 2012 Sascha Hauer, Pengutronix
6  * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7  *
8  * Based on the GEM/CMA helpers
9  *
10  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
19 
20 #include "drm.h"
21 #include "gem.h"
22 
23 static void tegra_bo_put(struct host1x_bo *bo)
24 {
25 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26 
27 	drm_gem_object_put(&obj->gem);
28 }
29 
30 /* XXX move this into lib/scatterlist.c? */
31 static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 				  unsigned int nents, gfp_t gfp_mask)
33 {
34 	struct scatterlist *dst;
35 	unsigned int i;
36 	int err;
37 
38 	err = sg_alloc_table(sgt, nents, gfp_mask);
39 	if (err < 0)
40 		return err;
41 
42 	dst = sgt->sgl;
43 
44 	for (i = 0; i < nents; i++) {
45 		sg_set_page(dst, sg_page(sg), sg->length, 0);
46 		dst = sg_next(dst);
47 		sg = sg_next(sg);
48 	}
49 
50 	return 0;
51 }
52 
53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 				     dma_addr_t *phys)
55 {
56 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 	struct sg_table *sgt;
58 	int err;
59 
60 	/*
61 	 * If we've manually mapped the buffer object through the IOMMU, make
62 	 * sure to return the IOVA address of our mapping.
63 	 *
64 	 * Similarly, for buffers that have been allocated by the DMA API the
65 	 * physical address can be used for devices that are not attached to
66 	 * an IOMMU. For these devices, callers must pass a valid pointer via
67 	 * the @phys argument.
68 	 *
69 	 * Imported buffers were also already mapped at import time, so the
70 	 * existing mapping can be reused.
71 	 */
72 	if (phys) {
73 		*phys = obj->iova;
74 		return NULL;
75 	}
76 
77 	/*
78 	 * If we don't have a mapping for this buffer yet, return an SG table
79 	 * so that host1x can do the mapping for us via the DMA API.
80 	 */
81 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
82 	if (!sgt)
83 		return ERR_PTR(-ENOMEM);
84 
85 	if (obj->pages) {
86 		/*
87 		 * If the buffer object was allocated from the explicit IOMMU
88 		 * API code paths, construct an SG table from the pages.
89 		 */
90 		err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
91 						0, obj->gem.size, GFP_KERNEL);
92 		if (err < 0)
93 			goto free;
94 	} else if (obj->sgt) {
95 		/*
96 		 * If the buffer object already has an SG table but no pages
97 		 * were allocated for it, it means the buffer was imported and
98 		 * the SG table needs to be copied to avoid overwriting any
99 		 * other potential users of the original SG table.
100 		 */
101 		err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
102 					     GFP_KERNEL);
103 		if (err < 0)
104 			goto free;
105 	} else {
106 		/*
107 		 * If the buffer object had no pages allocated and if it was
108 		 * not imported, it had to be allocated with the DMA API, so
109 		 * the DMA API helper can be used.
110 		 */
111 		err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112 				      obj->gem.size);
113 		if (err < 0)
114 			goto free;
115 	}
116 
117 	return sgt;
118 
119 free:
120 	kfree(sgt);
121 	return ERR_PTR(err);
122 }
123 
124 static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
125 {
126 	if (sgt) {
127 		sg_free_table(sgt);
128 		kfree(sgt);
129 	}
130 }
131 
132 static void *tegra_bo_mmap(struct host1x_bo *bo)
133 {
134 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135 	struct dma_buf_map map;
136 	int ret;
137 
138 	if (obj->vaddr) {
139 		return obj->vaddr;
140 	} else if (obj->gem.import_attach) {
141 		ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
142 		return ret ? NULL : map.vaddr;
143 	} else {
144 		return vmap(obj->pages, obj->num_pages, VM_MAP,
145 			    pgprot_writecombine(PAGE_KERNEL));
146 	}
147 }
148 
149 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
150 {
151 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
152 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
153 
154 	if (obj->vaddr)
155 		return;
156 	else if (obj->gem.import_attach)
157 		dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
158 	else
159 		vunmap(addr);
160 }
161 
162 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
163 {
164 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
165 
166 	drm_gem_object_get(&obj->gem);
167 
168 	return bo;
169 }
170 
171 static const struct host1x_bo_ops tegra_bo_ops = {
172 	.get = tegra_bo_get,
173 	.put = tegra_bo_put,
174 	.pin = tegra_bo_pin,
175 	.unpin = tegra_bo_unpin,
176 	.mmap = tegra_bo_mmap,
177 	.munmap = tegra_bo_munmap,
178 };
179 
180 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
181 {
182 	int prot = IOMMU_READ | IOMMU_WRITE;
183 	int err;
184 
185 	if (bo->mm)
186 		return -EBUSY;
187 
188 	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
189 	if (!bo->mm)
190 		return -ENOMEM;
191 
192 	mutex_lock(&tegra->mm_lock);
193 
194 	err = drm_mm_insert_node_generic(&tegra->mm,
195 					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
196 	if (err < 0) {
197 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
198 			err);
199 		goto unlock;
200 	}
201 
202 	bo->iova = bo->mm->start;
203 
204 	bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
205 				bo->sgt->nents, prot);
206 	if (!bo->size) {
207 		dev_err(tegra->drm->dev, "failed to map buffer\n");
208 		err = -ENOMEM;
209 		goto remove;
210 	}
211 
212 	mutex_unlock(&tegra->mm_lock);
213 
214 	return 0;
215 
216 remove:
217 	drm_mm_remove_node(bo->mm);
218 unlock:
219 	mutex_unlock(&tegra->mm_lock);
220 	kfree(bo->mm);
221 	return err;
222 }
223 
224 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
225 {
226 	if (!bo->mm)
227 		return 0;
228 
229 	mutex_lock(&tegra->mm_lock);
230 	iommu_unmap(tegra->domain, bo->iova, bo->size);
231 	drm_mm_remove_node(bo->mm);
232 	mutex_unlock(&tegra->mm_lock);
233 
234 	kfree(bo->mm);
235 
236 	return 0;
237 }
238 
239 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
240 	.free = tegra_bo_free_object,
241 	.export = tegra_gem_prime_export,
242 	.vm_ops = &tegra_bo_vm_ops,
243 };
244 
245 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
246 					      size_t size)
247 {
248 	struct tegra_bo *bo;
249 	int err;
250 
251 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
252 	if (!bo)
253 		return ERR_PTR(-ENOMEM);
254 
255 	bo->gem.funcs = &tegra_gem_object_funcs;
256 
257 	host1x_bo_init(&bo->base, &tegra_bo_ops);
258 	size = round_up(size, PAGE_SIZE);
259 
260 	err = drm_gem_object_init(drm, &bo->gem, size);
261 	if (err < 0)
262 		goto free;
263 
264 	err = drm_gem_create_mmap_offset(&bo->gem);
265 	if (err < 0)
266 		goto release;
267 
268 	return bo;
269 
270 release:
271 	drm_gem_object_release(&bo->gem);
272 free:
273 	kfree(bo);
274 	return ERR_PTR(err);
275 }
276 
277 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
278 {
279 	if (bo->pages) {
280 		dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
281 			     DMA_FROM_DEVICE);
282 		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
283 		sg_free_table(bo->sgt);
284 		kfree(bo->sgt);
285 	} else if (bo->vaddr) {
286 		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
287 	}
288 }
289 
290 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
291 {
292 	int err;
293 
294 	bo->pages = drm_gem_get_pages(&bo->gem);
295 	if (IS_ERR(bo->pages))
296 		return PTR_ERR(bo->pages);
297 
298 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
299 
300 	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
301 	if (IS_ERR(bo->sgt)) {
302 		err = PTR_ERR(bo->sgt);
303 		goto put_pages;
304 	}
305 
306 	err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
307 			 DMA_FROM_DEVICE);
308 	if (err == 0) {
309 		err = -EFAULT;
310 		goto free_sgt;
311 	}
312 
313 	return 0;
314 
315 free_sgt:
316 	sg_free_table(bo->sgt);
317 	kfree(bo->sgt);
318 put_pages:
319 	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
320 	return err;
321 }
322 
323 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
324 {
325 	struct tegra_drm *tegra = drm->dev_private;
326 	int err;
327 
328 	if (tegra->domain) {
329 		err = tegra_bo_get_pages(drm, bo);
330 		if (err < 0)
331 			return err;
332 
333 		err = tegra_bo_iommu_map(tegra, bo);
334 		if (err < 0) {
335 			tegra_bo_free(drm, bo);
336 			return err;
337 		}
338 	} else {
339 		size_t size = bo->gem.size;
340 
341 		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
342 					 GFP_KERNEL | __GFP_NOWARN);
343 		if (!bo->vaddr) {
344 			dev_err(drm->dev,
345 				"failed to allocate buffer of size %zu\n",
346 				size);
347 			return -ENOMEM;
348 		}
349 	}
350 
351 	return 0;
352 }
353 
354 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
355 				 unsigned long flags)
356 {
357 	struct tegra_bo *bo;
358 	int err;
359 
360 	bo = tegra_bo_alloc_object(drm, size);
361 	if (IS_ERR(bo))
362 		return bo;
363 
364 	err = tegra_bo_alloc(drm, bo);
365 	if (err < 0)
366 		goto release;
367 
368 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
369 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
370 
371 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
372 		bo->flags |= TEGRA_BO_BOTTOM_UP;
373 
374 	return bo;
375 
376 release:
377 	drm_gem_object_release(&bo->gem);
378 	kfree(bo);
379 	return ERR_PTR(err);
380 }
381 
382 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
383 					     struct drm_device *drm,
384 					     size_t size,
385 					     unsigned long flags,
386 					     u32 *handle)
387 {
388 	struct tegra_bo *bo;
389 	int err;
390 
391 	bo = tegra_bo_create(drm, size, flags);
392 	if (IS_ERR(bo))
393 		return bo;
394 
395 	err = drm_gem_handle_create(file, &bo->gem, handle);
396 	if (err) {
397 		tegra_bo_free_object(&bo->gem);
398 		return ERR_PTR(err);
399 	}
400 
401 	drm_gem_object_put(&bo->gem);
402 
403 	return bo;
404 }
405 
406 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
407 					struct dma_buf *buf)
408 {
409 	struct tegra_drm *tegra = drm->dev_private;
410 	struct dma_buf_attachment *attach;
411 	struct tegra_bo *bo;
412 	int err;
413 
414 	bo = tegra_bo_alloc_object(drm, buf->size);
415 	if (IS_ERR(bo))
416 		return bo;
417 
418 	attach = dma_buf_attach(buf, drm->dev);
419 	if (IS_ERR(attach)) {
420 		err = PTR_ERR(attach);
421 		goto free;
422 	}
423 
424 	get_dma_buf(buf);
425 
426 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
427 	if (IS_ERR(bo->sgt)) {
428 		err = PTR_ERR(bo->sgt);
429 		goto detach;
430 	}
431 
432 	if (tegra->domain) {
433 		err = tegra_bo_iommu_map(tegra, bo);
434 		if (err < 0)
435 			goto detach;
436 	}
437 
438 	bo->gem.import_attach = attach;
439 
440 	return bo;
441 
442 detach:
443 	if (!IS_ERR_OR_NULL(bo->sgt))
444 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
445 
446 	dma_buf_detach(buf, attach);
447 	dma_buf_put(buf);
448 free:
449 	drm_gem_object_release(&bo->gem);
450 	kfree(bo);
451 	return ERR_PTR(err);
452 }
453 
454 void tegra_bo_free_object(struct drm_gem_object *gem)
455 {
456 	struct tegra_drm *tegra = gem->dev->dev_private;
457 	struct tegra_bo *bo = to_tegra_bo(gem);
458 
459 	if (tegra->domain)
460 		tegra_bo_iommu_unmap(tegra, bo);
461 
462 	if (gem->import_attach) {
463 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
464 					 DMA_TO_DEVICE);
465 		drm_prime_gem_destroy(gem, NULL);
466 	} else {
467 		tegra_bo_free(gem->dev, bo);
468 	}
469 
470 	drm_gem_object_release(gem);
471 	kfree(bo);
472 }
473 
474 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
475 			 struct drm_mode_create_dumb *args)
476 {
477 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
478 	struct tegra_drm *tegra = drm->dev_private;
479 	struct tegra_bo *bo;
480 
481 	args->pitch = round_up(min_pitch, tegra->pitch_align);
482 	args->size = args->pitch * args->height;
483 
484 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
485 					 &args->handle);
486 	if (IS_ERR(bo))
487 		return PTR_ERR(bo);
488 
489 	return 0;
490 }
491 
492 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
493 {
494 	struct vm_area_struct *vma = vmf->vma;
495 	struct drm_gem_object *gem = vma->vm_private_data;
496 	struct tegra_bo *bo = to_tegra_bo(gem);
497 	struct page *page;
498 	pgoff_t offset;
499 
500 	if (!bo->pages)
501 		return VM_FAULT_SIGBUS;
502 
503 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
504 	page = bo->pages[offset];
505 
506 	return vmf_insert_page(vma, vmf->address, page);
507 }
508 
509 const struct vm_operations_struct tegra_bo_vm_ops = {
510 	.fault = tegra_bo_fault,
511 	.open = drm_gem_vm_open,
512 	.close = drm_gem_vm_close,
513 };
514 
515 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
516 {
517 	struct tegra_bo *bo = to_tegra_bo(gem);
518 
519 	if (!bo->pages) {
520 		unsigned long vm_pgoff = vma->vm_pgoff;
521 		int err;
522 
523 		/*
524 		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
525 		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
526 		 * to 0 as we want to map the whole buffer.
527 		 */
528 		vma->vm_flags &= ~VM_PFNMAP;
529 		vma->vm_pgoff = 0;
530 
531 		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
532 				  gem->size);
533 		if (err < 0) {
534 			drm_gem_vm_close(vma);
535 			return err;
536 		}
537 
538 		vma->vm_pgoff = vm_pgoff;
539 	} else {
540 		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
541 
542 		vma->vm_flags |= VM_MIXEDMAP;
543 		vma->vm_flags &= ~VM_PFNMAP;
544 
545 		vma->vm_page_prot = pgprot_writecombine(prot);
546 	}
547 
548 	return 0;
549 }
550 
551 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
552 {
553 	struct drm_gem_object *gem;
554 	int err;
555 
556 	err = drm_gem_mmap(file, vma);
557 	if (err < 0)
558 		return err;
559 
560 	gem = vma->vm_private_data;
561 
562 	return __tegra_gem_mmap(gem, vma);
563 }
564 
565 static struct sg_table *
566 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
567 			    enum dma_data_direction dir)
568 {
569 	struct drm_gem_object *gem = attach->dmabuf->priv;
570 	struct tegra_bo *bo = to_tegra_bo(gem);
571 	struct sg_table *sgt;
572 
573 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
574 	if (!sgt)
575 		return NULL;
576 
577 	if (bo->pages) {
578 		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
579 					      0, gem->size, GFP_KERNEL) < 0)
580 			goto free;
581 	} else {
582 		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
583 				    gem->size) < 0)
584 			goto free;
585 	}
586 
587 	if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
588 		goto free;
589 
590 	return sgt;
591 
592 free:
593 	sg_free_table(sgt);
594 	kfree(sgt);
595 	return NULL;
596 }
597 
598 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
599 					  struct sg_table *sgt,
600 					  enum dma_data_direction dir)
601 {
602 	struct drm_gem_object *gem = attach->dmabuf->priv;
603 	struct tegra_bo *bo = to_tegra_bo(gem);
604 
605 	if (bo->pages)
606 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
607 
608 	sg_free_table(sgt);
609 	kfree(sgt);
610 }
611 
612 static void tegra_gem_prime_release(struct dma_buf *buf)
613 {
614 	drm_gem_dmabuf_release(buf);
615 }
616 
617 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
618 					    enum dma_data_direction direction)
619 {
620 	struct drm_gem_object *gem = buf->priv;
621 	struct tegra_bo *bo = to_tegra_bo(gem);
622 	struct drm_device *drm = gem->dev;
623 
624 	if (bo->pages)
625 		dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
626 				    DMA_FROM_DEVICE);
627 
628 	return 0;
629 }
630 
631 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
632 					  enum dma_data_direction direction)
633 {
634 	struct drm_gem_object *gem = buf->priv;
635 	struct tegra_bo *bo = to_tegra_bo(gem);
636 	struct drm_device *drm = gem->dev;
637 
638 	if (bo->pages)
639 		dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
640 				       DMA_TO_DEVICE);
641 
642 	return 0;
643 }
644 
645 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
646 {
647 	struct drm_gem_object *gem = buf->priv;
648 	int err;
649 
650 	err = drm_gem_mmap_obj(gem, gem->size, vma);
651 	if (err < 0)
652 		return err;
653 
654 	return __tegra_gem_mmap(gem, vma);
655 }
656 
657 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
658 {
659 	struct drm_gem_object *gem = buf->priv;
660 	struct tegra_bo *bo = to_tegra_bo(gem);
661 
662 	dma_buf_map_set_vaddr(map, bo->vaddr);
663 
664 	return 0;
665 }
666 
667 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
668 {
669 }
670 
671 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
672 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
673 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
674 	.release = tegra_gem_prime_release,
675 	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
676 	.end_cpu_access = tegra_gem_prime_end_cpu_access,
677 	.mmap = tegra_gem_prime_mmap,
678 	.vmap = tegra_gem_prime_vmap,
679 	.vunmap = tegra_gem_prime_vunmap,
680 };
681 
682 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
683 				       int flags)
684 {
685 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
686 
687 	exp_info.exp_name = KBUILD_MODNAME;
688 	exp_info.owner = gem->dev->driver->fops->owner;
689 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
690 	exp_info.size = gem->size;
691 	exp_info.flags = flags;
692 	exp_info.priv = gem;
693 
694 	return drm_gem_dmabuf_export(gem->dev, &exp_info);
695 }
696 
697 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
698 					      struct dma_buf *buf)
699 {
700 	struct tegra_bo *bo;
701 
702 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
703 		struct drm_gem_object *gem = buf->priv;
704 
705 		if (gem->dev == drm) {
706 			drm_gem_object_get(gem);
707 			return gem;
708 		}
709 	}
710 
711 	bo = tegra_bo_import(drm, buf);
712 	if (IS_ERR(bo))
713 		return ERR_CAST(bo);
714 
715 	return &bo->gem;
716 }
717