xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 8eaf9b02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/spinlock.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/dma-buf.h>
11 #include <linux/pfn_t.h>
12 
13 #include <drm/drm_prime.h>
14 
15 #include "msm_drv.h"
16 #include "msm_fence.h"
17 #include "msm_gem.h"
18 #include "msm_gpu.h"
19 #include "msm_mmu.h"
20 
21 static void update_inactive(struct msm_gem_object *msm_obj);
22 
23 static dma_addr_t physaddr(struct drm_gem_object *obj)
24 {
25 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 	struct msm_drm_private *priv = obj->dev->dev_private;
27 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 			priv->vram.paddr;
29 }
30 
31 static bool use_pages(struct drm_gem_object *obj)
32 {
33 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 	return !msm_obj->vram_node;
35 }
36 
37 /*
38  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39  * API.  Really GPU cache is out of scope here (handled on cmdstream)
40  * and all we need to do is invalidate newly allocated pages before
41  * mapping to CPU as uncached/writecombine.
42  *
43  * On top of this, we have the added headache, that depending on
44  * display generation, the display's iommu may be wired up to either
45  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46  * that here we either have dma-direct or iommu ops.
47  *
48  * Let this be a cautionary tail of abstraction gone wrong.
49  */
50 
51 static void sync_for_device(struct msm_gem_object *msm_obj)
52 {
53 	struct device *dev = msm_obj->base.dev->dev;
54 
55 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
56 }
57 
58 static void sync_for_cpu(struct msm_gem_object *msm_obj)
59 {
60 	struct device *dev = msm_obj->base.dev->dev;
61 
62 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
63 }
64 
65 /* allocate pages from VRAM carveout, used when no IOMMU: */
66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
67 {
68 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 	struct msm_drm_private *priv = obj->dev->dev_private;
70 	dma_addr_t paddr;
71 	struct page **p;
72 	int ret, i;
73 
74 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
75 	if (!p)
76 		return ERR_PTR(-ENOMEM);
77 
78 	spin_lock(&priv->vram.lock);
79 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
80 	spin_unlock(&priv->vram.lock);
81 	if (ret) {
82 		kvfree(p);
83 		return ERR_PTR(ret);
84 	}
85 
86 	paddr = physaddr(obj);
87 	for (i = 0; i < npages; i++) {
88 		p[i] = phys_to_page(paddr);
89 		paddr += PAGE_SIZE;
90 	}
91 
92 	return p;
93 }
94 
95 static struct page **get_pages(struct drm_gem_object *obj)
96 {
97 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
98 
99 	GEM_WARN_ON(!msm_gem_is_locked(obj));
100 
101 	if (!msm_obj->pages) {
102 		struct drm_device *dev = obj->dev;
103 		struct page **p;
104 		int npages = obj->size >> PAGE_SHIFT;
105 
106 		if (use_pages(obj))
107 			p = drm_gem_get_pages(obj);
108 		else
109 			p = get_pages_vram(obj, npages);
110 
111 		if (IS_ERR(p)) {
112 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
113 					PTR_ERR(p));
114 			return p;
115 		}
116 
117 		msm_obj->pages = p;
118 
119 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
120 		if (IS_ERR(msm_obj->sgt)) {
121 			void *ptr = ERR_CAST(msm_obj->sgt);
122 
123 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
124 			msm_obj->sgt = NULL;
125 			return ptr;
126 		}
127 
128 		/* For non-cached buffers, ensure the new pages are clean
129 		 * because display controller, GPU, etc. are not coherent:
130 		 */
131 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
132 			sync_for_device(msm_obj);
133 
134 		GEM_WARN_ON(msm_obj->active_count);
135 		update_inactive(msm_obj);
136 	}
137 
138 	return msm_obj->pages;
139 }
140 
141 static void put_pages_vram(struct drm_gem_object *obj)
142 {
143 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 	struct msm_drm_private *priv = obj->dev->dev_private;
145 
146 	spin_lock(&priv->vram.lock);
147 	drm_mm_remove_node(msm_obj->vram_node);
148 	spin_unlock(&priv->vram.lock);
149 
150 	kvfree(msm_obj->pages);
151 }
152 
153 static void put_pages(struct drm_gem_object *obj)
154 {
155 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156 
157 	if (msm_obj->pages) {
158 		if (msm_obj->sgt) {
159 			/* For non-cached buffers, ensure the new
160 			 * pages are clean because display controller,
161 			 * GPU, etc. are not coherent:
162 			 */
163 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
164 				sync_for_cpu(msm_obj);
165 
166 			sg_free_table(msm_obj->sgt);
167 			kfree(msm_obj->sgt);
168 			msm_obj->sgt = NULL;
169 		}
170 
171 		if (use_pages(obj))
172 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 		else
174 			put_pages_vram(obj);
175 
176 		msm_obj->pages = NULL;
177 	}
178 }
179 
180 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
181 {
182 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
183 	struct page **p;
184 
185 	msm_gem_lock(obj);
186 
187 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188 		msm_gem_unlock(obj);
189 		return ERR_PTR(-EBUSY);
190 	}
191 
192 	p = get_pages(obj);
193 
194 	if (!IS_ERR(p)) {
195 		msm_obj->pin_count++;
196 		update_inactive(msm_obj);
197 	}
198 
199 	msm_gem_unlock(obj);
200 	return p;
201 }
202 
203 void msm_gem_put_pages(struct drm_gem_object *obj)
204 {
205 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
206 
207 	msm_gem_lock(obj);
208 	msm_obj->pin_count--;
209 	GEM_WARN_ON(msm_obj->pin_count < 0);
210 	update_inactive(msm_obj);
211 	msm_gem_unlock(obj);
212 }
213 
214 int msm_gem_mmap_obj(struct drm_gem_object *obj,
215 		struct vm_area_struct *vma)
216 {
217 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
218 
219 	vma->vm_flags &= ~VM_PFNMAP;
220 	vma->vm_flags |= VM_MIXEDMAP;
221 
222 	if (msm_obj->flags & MSM_BO_WC)
223 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
224 	else if (msm_obj->flags & MSM_BO_UNCACHED)
225 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
226 	else
227 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
228 
229 	return 0;
230 }
231 
232 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
233 {
234 	int ret;
235 
236 	ret = drm_gem_mmap(filp, vma);
237 	if (ret) {
238 		DBG("mmap failed: %d", ret);
239 		return ret;
240 	}
241 
242 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
243 }
244 
245 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
246 {
247 	struct vm_area_struct *vma = vmf->vma;
248 	struct drm_gem_object *obj = vma->vm_private_data;
249 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
250 	struct page **pages;
251 	unsigned long pfn;
252 	pgoff_t pgoff;
253 	int err;
254 	vm_fault_t ret;
255 
256 	/*
257 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
258 	 * a reference on obj. So, we dont need to hold one here.
259 	 */
260 	err = msm_gem_lock_interruptible(obj);
261 	if (err) {
262 		ret = VM_FAULT_NOPAGE;
263 		goto out;
264 	}
265 
266 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
267 		msm_gem_unlock(obj);
268 		return VM_FAULT_SIGBUS;
269 	}
270 
271 	/* make sure we have pages attached now */
272 	pages = get_pages(obj);
273 	if (IS_ERR(pages)) {
274 		ret = vmf_error(PTR_ERR(pages));
275 		goto out_unlock;
276 	}
277 
278 	/* We don't use vmf->pgoff since that has the fake offset: */
279 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
280 
281 	pfn = page_to_pfn(pages[pgoff]);
282 
283 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
284 			pfn, pfn << PAGE_SHIFT);
285 
286 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
287 out_unlock:
288 	msm_gem_unlock(obj);
289 out:
290 	return ret;
291 }
292 
293 /** get mmap offset */
294 static uint64_t mmap_offset(struct drm_gem_object *obj)
295 {
296 	struct drm_device *dev = obj->dev;
297 	int ret;
298 
299 	GEM_WARN_ON(!msm_gem_is_locked(obj));
300 
301 	/* Make it mmapable */
302 	ret = drm_gem_create_mmap_offset(obj);
303 
304 	if (ret) {
305 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
306 		return 0;
307 	}
308 
309 	return drm_vma_node_offset_addr(&obj->vma_node);
310 }
311 
312 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
313 {
314 	uint64_t offset;
315 
316 	msm_gem_lock(obj);
317 	offset = mmap_offset(obj);
318 	msm_gem_unlock(obj);
319 	return offset;
320 }
321 
322 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
323 		struct msm_gem_address_space *aspace)
324 {
325 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
326 	struct msm_gem_vma *vma;
327 
328 	GEM_WARN_ON(!msm_gem_is_locked(obj));
329 
330 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
331 	if (!vma)
332 		return ERR_PTR(-ENOMEM);
333 
334 	vma->aspace = aspace;
335 
336 	list_add_tail(&vma->list, &msm_obj->vmas);
337 
338 	return vma;
339 }
340 
341 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
342 		struct msm_gem_address_space *aspace)
343 {
344 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
345 	struct msm_gem_vma *vma;
346 
347 	GEM_WARN_ON(!msm_gem_is_locked(obj));
348 
349 	list_for_each_entry(vma, &msm_obj->vmas, list) {
350 		if (vma->aspace == aspace)
351 			return vma;
352 	}
353 
354 	return NULL;
355 }
356 
357 static void del_vma(struct msm_gem_vma *vma)
358 {
359 	if (!vma)
360 		return;
361 
362 	list_del(&vma->list);
363 	kfree(vma);
364 }
365 
366 /**
367  * If close is true, this also closes the VMA (releasing the allocated
368  * iova range) in addition to removing the iommu mapping.  In the eviction
369  * case (!close), we keep the iova allocated, but only remove the iommu
370  * mapping.
371  */
372 static void
373 put_iova_spaces(struct drm_gem_object *obj, bool close)
374 {
375 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 	struct msm_gem_vma *vma;
377 
378 	GEM_WARN_ON(!msm_gem_is_locked(obj));
379 
380 	list_for_each_entry(vma, &msm_obj->vmas, list) {
381 		if (vma->aspace) {
382 			msm_gem_purge_vma(vma->aspace, vma);
383 			if (close)
384 				msm_gem_close_vma(vma->aspace, vma);
385 		}
386 	}
387 }
388 
389 /* Called with msm_obj locked */
390 static void
391 put_iova_vmas(struct drm_gem_object *obj)
392 {
393 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
394 	struct msm_gem_vma *vma, *tmp;
395 
396 	GEM_WARN_ON(!msm_gem_is_locked(obj));
397 
398 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
399 		del_vma(vma);
400 	}
401 }
402 
403 static int get_iova_locked(struct drm_gem_object *obj,
404 		struct msm_gem_address_space *aspace, uint64_t *iova,
405 		u64 range_start, u64 range_end)
406 {
407 	struct msm_gem_vma *vma;
408 	int ret = 0;
409 
410 	GEM_WARN_ON(!msm_gem_is_locked(obj));
411 
412 	vma = lookup_vma(obj, aspace);
413 
414 	if (!vma) {
415 		vma = add_vma(obj, aspace);
416 		if (IS_ERR(vma))
417 			return PTR_ERR(vma);
418 
419 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
420 			range_start, range_end);
421 		if (ret) {
422 			del_vma(vma);
423 			return ret;
424 		}
425 	}
426 
427 	*iova = vma->iova;
428 	return 0;
429 }
430 
431 static int msm_gem_pin_iova(struct drm_gem_object *obj,
432 		struct msm_gem_address_space *aspace)
433 {
434 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
435 	struct msm_gem_vma *vma;
436 	struct page **pages;
437 	int ret, prot = IOMMU_READ;
438 
439 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
440 		prot |= IOMMU_WRITE;
441 
442 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
443 		prot |= IOMMU_PRIV;
444 
445 	GEM_WARN_ON(!msm_gem_is_locked(obj));
446 
447 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
448 		return -EBUSY;
449 
450 	vma = lookup_vma(obj, aspace);
451 	if (GEM_WARN_ON(!vma))
452 		return -EINVAL;
453 
454 	pages = get_pages(obj);
455 	if (IS_ERR(pages))
456 		return PTR_ERR(pages);
457 
458 	ret = msm_gem_map_vma(aspace, vma, prot,
459 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
460 
461 	if (!ret)
462 		msm_obj->pin_count++;
463 
464 	return ret;
465 }
466 
467 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
468 		struct msm_gem_address_space *aspace, uint64_t *iova,
469 		u64 range_start, u64 range_end)
470 {
471 	u64 local;
472 	int ret;
473 
474 	GEM_WARN_ON(!msm_gem_is_locked(obj));
475 
476 	ret = get_iova_locked(obj, aspace, &local,
477 		range_start, range_end);
478 
479 	if (!ret)
480 		ret = msm_gem_pin_iova(obj, aspace);
481 
482 	if (!ret)
483 		*iova = local;
484 
485 	return ret;
486 }
487 
488 /*
489  * get iova and pin it. Should have a matching put
490  * limits iova to specified range (in pages)
491  */
492 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
493 		struct msm_gem_address_space *aspace, uint64_t *iova,
494 		u64 range_start, u64 range_end)
495 {
496 	int ret;
497 
498 	msm_gem_lock(obj);
499 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
500 	msm_gem_unlock(obj);
501 
502 	return ret;
503 }
504 
505 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
506 		struct msm_gem_address_space *aspace, uint64_t *iova)
507 {
508 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
509 }
510 
511 /* get iova and pin it. Should have a matching put */
512 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
513 		struct msm_gem_address_space *aspace, uint64_t *iova)
514 {
515 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
516 }
517 
518 /*
519  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
520  * valid for the life of the object
521  */
522 int msm_gem_get_iova(struct drm_gem_object *obj,
523 		struct msm_gem_address_space *aspace, uint64_t *iova)
524 {
525 	int ret;
526 
527 	msm_gem_lock(obj);
528 	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
529 	msm_gem_unlock(obj);
530 
531 	return ret;
532 }
533 
534 /* get iova without taking a reference, used in places where you have
535  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
536  */
537 uint64_t msm_gem_iova(struct drm_gem_object *obj,
538 		struct msm_gem_address_space *aspace)
539 {
540 	struct msm_gem_vma *vma;
541 
542 	msm_gem_lock(obj);
543 	vma = lookup_vma(obj, aspace);
544 	msm_gem_unlock(obj);
545 	GEM_WARN_ON(!vma);
546 
547 	return vma ? vma->iova : 0;
548 }
549 
550 /*
551  * Locked variant of msm_gem_unpin_iova()
552  */
553 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
554 		struct msm_gem_address_space *aspace)
555 {
556 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
557 	struct msm_gem_vma *vma;
558 
559 	GEM_WARN_ON(!msm_gem_is_locked(obj));
560 
561 	vma = lookup_vma(obj, aspace);
562 
563 	if (!GEM_WARN_ON(!vma)) {
564 		msm_gem_unmap_vma(aspace, vma);
565 
566 		msm_obj->pin_count--;
567 		GEM_WARN_ON(msm_obj->pin_count < 0);
568 
569 		update_inactive(msm_obj);
570 	}
571 }
572 
573 /*
574  * Unpin a iova by updating the reference counts. The memory isn't actually
575  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
576  * to get rid of it
577  */
578 void msm_gem_unpin_iova(struct drm_gem_object *obj,
579 		struct msm_gem_address_space *aspace)
580 {
581 	msm_gem_lock(obj);
582 	msm_gem_unpin_iova_locked(obj, aspace);
583 	msm_gem_unlock(obj);
584 }
585 
586 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
587 		struct drm_mode_create_dumb *args)
588 {
589 	args->pitch = align_pitch(args->width, args->bpp);
590 	args->size  = PAGE_ALIGN(args->pitch * args->height);
591 	return msm_gem_new_handle(dev, file, args->size,
592 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
593 }
594 
595 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
596 		uint32_t handle, uint64_t *offset)
597 {
598 	struct drm_gem_object *obj;
599 	int ret = 0;
600 
601 	/* GEM does all our handle to object mapping */
602 	obj = drm_gem_object_lookup(file, handle);
603 	if (obj == NULL) {
604 		ret = -ENOENT;
605 		goto fail;
606 	}
607 
608 	*offset = msm_gem_mmap_offset(obj);
609 
610 	drm_gem_object_put(obj);
611 
612 fail:
613 	return ret;
614 }
615 
616 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
617 {
618 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
619 	int ret = 0;
620 
621 	GEM_WARN_ON(!msm_gem_is_locked(obj));
622 
623 	if (obj->import_attach)
624 		return ERR_PTR(-ENODEV);
625 
626 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
627 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
628 			msm_obj->madv, madv);
629 		return ERR_PTR(-EBUSY);
630 	}
631 
632 	/* increment vmap_count *before* vmap() call, so shrinker can
633 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
634 	 * This guarantees that we won't try to msm_gem_vunmap() this
635 	 * same object from within the vmap() call (while we already
636 	 * hold msm_obj lock)
637 	 */
638 	msm_obj->vmap_count++;
639 
640 	if (!msm_obj->vaddr) {
641 		struct page **pages = get_pages(obj);
642 		if (IS_ERR(pages)) {
643 			ret = PTR_ERR(pages);
644 			goto fail;
645 		}
646 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
647 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
648 		if (msm_obj->vaddr == NULL) {
649 			ret = -ENOMEM;
650 			goto fail;
651 		}
652 
653 		update_inactive(msm_obj);
654 	}
655 
656 	return msm_obj->vaddr;
657 
658 fail:
659 	msm_obj->vmap_count--;
660 	return ERR_PTR(ret);
661 }
662 
663 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
664 {
665 	return get_vaddr(obj, MSM_MADV_WILLNEED);
666 }
667 
668 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
669 {
670 	void *ret;
671 
672 	msm_gem_lock(obj);
673 	ret = msm_gem_get_vaddr_locked(obj);
674 	msm_gem_unlock(obj);
675 
676 	return ret;
677 }
678 
679 /*
680  * Don't use this!  It is for the very special case of dumping
681  * submits from GPU hangs or faults, were the bo may already
682  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
683  * active list.
684  */
685 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
686 {
687 	return get_vaddr(obj, __MSM_MADV_PURGED);
688 }
689 
690 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
691 {
692 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
693 
694 	GEM_WARN_ON(!msm_gem_is_locked(obj));
695 	GEM_WARN_ON(msm_obj->vmap_count < 1);
696 
697 	msm_obj->vmap_count--;
698 }
699 
700 void msm_gem_put_vaddr(struct drm_gem_object *obj)
701 {
702 	msm_gem_lock(obj);
703 	msm_gem_put_vaddr_locked(obj);
704 	msm_gem_unlock(obj);
705 }
706 
707 /* Update madvise status, returns true if not purged, else
708  * false or -errno.
709  */
710 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
711 {
712 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
713 
714 	msm_gem_lock(obj);
715 
716 	if (msm_obj->madv != __MSM_MADV_PURGED)
717 		msm_obj->madv = madv;
718 
719 	madv = msm_obj->madv;
720 
721 	/* If the obj is inactive, we might need to move it
722 	 * between inactive lists
723 	 */
724 	if (msm_obj->active_count == 0)
725 		update_inactive(msm_obj);
726 
727 	msm_gem_unlock(obj);
728 
729 	return (madv != __MSM_MADV_PURGED);
730 }
731 
732 void msm_gem_purge(struct drm_gem_object *obj)
733 {
734 	struct drm_device *dev = obj->dev;
735 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
736 
737 	GEM_WARN_ON(!msm_gem_is_locked(obj));
738 	GEM_WARN_ON(!is_purgeable(msm_obj));
739 
740 	/* Get rid of any iommu mapping(s): */
741 	put_iova_spaces(obj, true);
742 
743 	msm_gem_vunmap(obj);
744 
745 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
746 
747 	put_pages(obj);
748 
749 	put_iova_vmas(obj);
750 
751 	msm_obj->madv = __MSM_MADV_PURGED;
752 	update_inactive(msm_obj);
753 
754 	drm_gem_free_mmap_offset(obj);
755 
756 	/* Our goal here is to return as much of the memory as
757 	 * is possible back to the system as we are called from OOM.
758 	 * To do this we must instruct the shmfs to drop all of its
759 	 * backing pages, *now*.
760 	 */
761 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
762 
763 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
764 			0, (loff_t)-1);
765 }
766 
767 /**
768  * Unpin the backing pages and make them available to be swapped out.
769  */
770 void msm_gem_evict(struct drm_gem_object *obj)
771 {
772 	struct drm_device *dev = obj->dev;
773 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
774 
775 	GEM_WARN_ON(!msm_gem_is_locked(obj));
776 	GEM_WARN_ON(is_unevictable(msm_obj));
777 	GEM_WARN_ON(!msm_obj->evictable);
778 	GEM_WARN_ON(msm_obj->active_count);
779 
780 	/* Get rid of any iommu mapping(s): */
781 	put_iova_spaces(obj, false);
782 
783 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
784 
785 	put_pages(obj);
786 
787 	update_inactive(msm_obj);
788 }
789 
790 void msm_gem_vunmap(struct drm_gem_object *obj)
791 {
792 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
793 
794 	GEM_WARN_ON(!msm_gem_is_locked(obj));
795 
796 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
797 		return;
798 
799 	vunmap(msm_obj->vaddr);
800 	msm_obj->vaddr = NULL;
801 }
802 
803 /* must be called before _move_to_active().. */
804 int msm_gem_sync_object(struct drm_gem_object *obj,
805 		struct msm_fence_context *fctx, bool exclusive)
806 {
807 	struct dma_resv_list *fobj;
808 	struct dma_fence *fence;
809 	int i, ret;
810 
811 	fobj = dma_resv_get_list(obj->resv);
812 	if (!fobj || (fobj->shared_count == 0)) {
813 		fence = dma_resv_get_excl(obj->resv);
814 		/* don't need to wait on our own fences, since ring is fifo */
815 		if (fence && (fence->context != fctx->context)) {
816 			ret = dma_fence_wait(fence, true);
817 			if (ret)
818 				return ret;
819 		}
820 	}
821 
822 	if (!exclusive || !fobj)
823 		return 0;
824 
825 	for (i = 0; i < fobj->shared_count; i++) {
826 		fence = rcu_dereference_protected(fobj->shared[i],
827 						dma_resv_held(obj->resv));
828 		if (fence->context != fctx->context) {
829 			ret = dma_fence_wait(fence, true);
830 			if (ret)
831 				return ret;
832 		}
833 	}
834 
835 	return 0;
836 }
837 
838 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
839 {
840 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
841 	struct msm_drm_private *priv = obj->dev->dev_private;
842 
843 	might_sleep();
844 	GEM_WARN_ON(!msm_gem_is_locked(obj));
845 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
846 	GEM_WARN_ON(msm_obj->dontneed);
847 	GEM_WARN_ON(!msm_obj->sgt);
848 
849 	if (msm_obj->active_count++ == 0) {
850 		mutex_lock(&priv->mm_lock);
851 		if (msm_obj->evictable)
852 			mark_unevictable(msm_obj);
853 		list_del(&msm_obj->mm_list);
854 		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
855 		mutex_unlock(&priv->mm_lock);
856 	}
857 }
858 
859 void msm_gem_active_put(struct drm_gem_object *obj)
860 {
861 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
862 
863 	might_sleep();
864 	GEM_WARN_ON(!msm_gem_is_locked(obj));
865 
866 	if (--msm_obj->active_count == 0) {
867 		update_inactive(msm_obj);
868 	}
869 }
870 
871 static void update_inactive(struct msm_gem_object *msm_obj)
872 {
873 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
874 
875 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
876 
877 	if (msm_obj->active_count != 0)
878 		return;
879 
880 	mutex_lock(&priv->mm_lock);
881 
882 	if (msm_obj->dontneed)
883 		mark_unpurgeable(msm_obj);
884 	if (msm_obj->evictable)
885 		mark_unevictable(msm_obj);
886 
887 	list_del(&msm_obj->mm_list);
888 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
889 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
890 		mark_evictable(msm_obj);
891 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
892 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
893 		mark_purgeable(msm_obj);
894 	} else {
895 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
896 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
897 	}
898 
899 	mutex_unlock(&priv->mm_lock);
900 }
901 
902 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
903 {
904 	bool write = !!(op & MSM_PREP_WRITE);
905 	unsigned long remain =
906 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
907 	long ret;
908 
909 	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
910 						  true,  remain);
911 	if (ret == 0)
912 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
913 	else if (ret < 0)
914 		return ret;
915 
916 	/* TODO cache maintenance */
917 
918 	return 0;
919 }
920 
921 int msm_gem_cpu_fini(struct drm_gem_object *obj)
922 {
923 	/* TODO cache maintenance */
924 	return 0;
925 }
926 
927 #ifdef CONFIG_DEBUG_FS
928 static void describe_fence(struct dma_fence *fence, const char *type,
929 		struct seq_file *m)
930 {
931 	if (!dma_fence_is_signaled(fence))
932 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
933 				fence->ops->get_driver_name(fence),
934 				fence->ops->get_timeline_name(fence),
935 				fence->seqno);
936 }
937 
938 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
939 		struct msm_gem_stats *stats)
940 {
941 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
942 	struct dma_resv *robj = obj->resv;
943 	struct dma_resv_list *fobj;
944 	struct dma_fence *fence;
945 	struct msm_gem_vma *vma;
946 	uint64_t off = drm_vma_node_start(&obj->vma_node);
947 	const char *madv;
948 
949 	msm_gem_lock(obj);
950 
951 	stats->all.count++;
952 	stats->all.size += obj->size;
953 
954 	if (is_active(msm_obj)) {
955 		stats->active.count++;
956 		stats->active.size += obj->size;
957 	}
958 
959 	if (msm_obj->pages) {
960 		stats->resident.count++;
961 		stats->resident.size += obj->size;
962 	}
963 
964 	switch (msm_obj->madv) {
965 	case __MSM_MADV_PURGED:
966 		stats->purged.count++;
967 		stats->purged.size += obj->size;
968 		madv = " purged";
969 		break;
970 	case MSM_MADV_DONTNEED:
971 		stats->purgeable.count++;
972 		stats->purgeable.size += obj->size;
973 		madv = " purgeable";
974 		break;
975 	case MSM_MADV_WILLNEED:
976 	default:
977 		madv = "";
978 		break;
979 	}
980 
981 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
982 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
983 			obj->name, kref_read(&obj->refcount),
984 			off, msm_obj->vaddr);
985 
986 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
987 
988 	if (!list_empty(&msm_obj->vmas)) {
989 
990 		seq_puts(m, "      vmas:");
991 
992 		list_for_each_entry(vma, &msm_obj->vmas, list) {
993 			const char *name, *comm;
994 			if (vma->aspace) {
995 				struct msm_gem_address_space *aspace = vma->aspace;
996 				struct task_struct *task =
997 					get_pid_task(aspace->pid, PIDTYPE_PID);
998 				if (task) {
999 					comm = kstrdup(task->comm, GFP_KERNEL);
1000 				} else {
1001 					comm = NULL;
1002 				}
1003 				name = aspace->name;
1004 			} else {
1005 				name = comm = NULL;
1006 			}
1007 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
1008 				name, comm ? ":" : "", comm ? comm : "",
1009 				vma->aspace, vma->iova,
1010 				vma->mapped ? "mapped" : "unmapped",
1011 				vma->inuse);
1012 			kfree(comm);
1013 		}
1014 
1015 		seq_puts(m, "\n");
1016 	}
1017 
1018 	rcu_read_lock();
1019 	fobj = rcu_dereference(robj->fence);
1020 	if (fobj) {
1021 		unsigned int i, shared_count = fobj->shared_count;
1022 
1023 		for (i = 0; i < shared_count; i++) {
1024 			fence = rcu_dereference(fobj->shared[i]);
1025 			describe_fence(fence, "Shared", m);
1026 		}
1027 	}
1028 
1029 	fence = rcu_dereference(robj->fence_excl);
1030 	if (fence)
1031 		describe_fence(fence, "Exclusive", m);
1032 	rcu_read_unlock();
1033 
1034 	msm_gem_unlock(obj);
1035 }
1036 
1037 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1038 {
1039 	struct msm_gem_stats stats = {};
1040 	struct msm_gem_object *msm_obj;
1041 
1042 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1043 	list_for_each_entry(msm_obj, list, node) {
1044 		struct drm_gem_object *obj = &msm_obj->base;
1045 		seq_puts(m, "   ");
1046 		msm_gem_describe(obj, m, &stats);
1047 	}
1048 
1049 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1050 			stats.all.count, stats.all.size);
1051 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1052 			stats.active.count, stats.active.size);
1053 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1054 			stats.resident.count, stats.resident.size);
1055 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1056 			stats.purgeable.count, stats.purgeable.size);
1057 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1058 			stats.purged.count, stats.purged.size);
1059 }
1060 #endif
1061 
1062 /* don't call directly!  Use drm_gem_object_put_locked() and friends */
1063 void msm_gem_free_object(struct drm_gem_object *obj)
1064 {
1065 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1066 	struct drm_device *dev = obj->dev;
1067 	struct msm_drm_private *priv = dev->dev_private;
1068 
1069 	mutex_lock(&priv->obj_lock);
1070 	list_del(&msm_obj->node);
1071 	mutex_unlock(&priv->obj_lock);
1072 
1073 	mutex_lock(&priv->mm_lock);
1074 	if (msm_obj->dontneed)
1075 		mark_unpurgeable(msm_obj);
1076 	list_del(&msm_obj->mm_list);
1077 	mutex_unlock(&priv->mm_lock);
1078 
1079 	msm_gem_lock(obj);
1080 
1081 	/* object should not be on active list: */
1082 	GEM_WARN_ON(is_active(msm_obj));
1083 
1084 	put_iova_spaces(obj, true);
1085 
1086 	if (obj->import_attach) {
1087 		GEM_WARN_ON(msm_obj->vaddr);
1088 
1089 		/* Don't drop the pages for imported dmabuf, as they are not
1090 		 * ours, just free the array we allocated:
1091 		 */
1092 		kvfree(msm_obj->pages);
1093 
1094 		put_iova_vmas(obj);
1095 
1096 		/* dma_buf_detach() grabs resv lock, so we need to unlock
1097 		 * prior to drm_prime_gem_destroy
1098 		 */
1099 		msm_gem_unlock(obj);
1100 
1101 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1102 	} else {
1103 		msm_gem_vunmap(obj);
1104 		put_pages(obj);
1105 		put_iova_vmas(obj);
1106 		msm_gem_unlock(obj);
1107 	}
1108 
1109 	drm_gem_object_release(obj);
1110 
1111 	kfree(msm_obj);
1112 }
1113 
1114 /* convenience method to construct a GEM buffer object, and userspace handle */
1115 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1116 		uint32_t size, uint32_t flags, uint32_t *handle,
1117 		char *name)
1118 {
1119 	struct drm_gem_object *obj;
1120 	int ret;
1121 
1122 	obj = msm_gem_new(dev, size, flags);
1123 
1124 	if (IS_ERR(obj))
1125 		return PTR_ERR(obj);
1126 
1127 	if (name)
1128 		msm_gem_object_set_name(obj, "%s", name);
1129 
1130 	ret = drm_gem_handle_create(file, obj, handle);
1131 
1132 	/* drop reference from allocate - handle holds it now */
1133 	drm_gem_object_put(obj);
1134 
1135 	return ret;
1136 }
1137 
1138 static const struct vm_operations_struct vm_ops = {
1139 	.fault = msm_gem_fault,
1140 	.open = drm_gem_vm_open,
1141 	.close = drm_gem_vm_close,
1142 };
1143 
1144 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1145 	.free = msm_gem_free_object,
1146 	.pin = msm_gem_prime_pin,
1147 	.unpin = msm_gem_prime_unpin,
1148 	.get_sg_table = msm_gem_prime_get_sg_table,
1149 	.vmap = msm_gem_prime_vmap,
1150 	.vunmap = msm_gem_prime_vunmap,
1151 	.vm_ops = &vm_ops,
1152 };
1153 
1154 static int msm_gem_new_impl(struct drm_device *dev,
1155 		uint32_t size, uint32_t flags,
1156 		struct drm_gem_object **obj)
1157 {
1158 	struct msm_gem_object *msm_obj;
1159 
1160 	switch (flags & MSM_BO_CACHE_MASK) {
1161 	case MSM_BO_UNCACHED:
1162 	case MSM_BO_CACHED:
1163 	case MSM_BO_WC:
1164 		break;
1165 	default:
1166 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1167 				(flags & MSM_BO_CACHE_MASK));
1168 		return -EINVAL;
1169 	}
1170 
1171 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1172 	if (!msm_obj)
1173 		return -ENOMEM;
1174 
1175 	msm_obj->flags = flags;
1176 	msm_obj->madv = MSM_MADV_WILLNEED;
1177 
1178 	INIT_LIST_HEAD(&msm_obj->submit_entry);
1179 	INIT_LIST_HEAD(&msm_obj->vmas);
1180 
1181 	*obj = &msm_obj->base;
1182 	(*obj)->funcs = &msm_gem_object_funcs;
1183 
1184 	return 0;
1185 }
1186 
1187 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1188 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
1189 {
1190 	struct msm_drm_private *priv = dev->dev_private;
1191 	struct msm_gem_object *msm_obj;
1192 	struct drm_gem_object *obj = NULL;
1193 	bool use_vram = false;
1194 	int ret;
1195 
1196 	size = PAGE_ALIGN(size);
1197 
1198 	if (!msm_use_mmu(dev))
1199 		use_vram = true;
1200 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1201 		use_vram = true;
1202 
1203 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1204 		return ERR_PTR(-EINVAL);
1205 
1206 	/* Disallow zero sized objects as they make the underlying
1207 	 * infrastructure grumpy
1208 	 */
1209 	if (size == 0)
1210 		return ERR_PTR(-EINVAL);
1211 
1212 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1213 	if (ret)
1214 		goto fail;
1215 
1216 	msm_obj = to_msm_bo(obj);
1217 
1218 	if (use_vram) {
1219 		struct msm_gem_vma *vma;
1220 		struct page **pages;
1221 
1222 		drm_gem_private_object_init(dev, obj, size);
1223 
1224 		msm_gem_lock(obj);
1225 
1226 		vma = add_vma(obj, NULL);
1227 		msm_gem_unlock(obj);
1228 		if (IS_ERR(vma)) {
1229 			ret = PTR_ERR(vma);
1230 			goto fail;
1231 		}
1232 
1233 		to_msm_bo(obj)->vram_node = &vma->node;
1234 
1235 		msm_gem_lock(obj);
1236 		pages = get_pages(obj);
1237 		msm_gem_unlock(obj);
1238 		if (IS_ERR(pages)) {
1239 			ret = PTR_ERR(pages);
1240 			goto fail;
1241 		}
1242 
1243 		vma->iova = physaddr(obj);
1244 	} else {
1245 		ret = drm_gem_object_init(dev, obj, size);
1246 		if (ret)
1247 			goto fail;
1248 		/*
1249 		 * Our buffers are kept pinned, so allocating them from the
1250 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1251 		 * See comments above new_inode() why this is required _and_
1252 		 * expected if you're going to pin these pages.
1253 		 */
1254 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1255 	}
1256 
1257 	mutex_lock(&priv->mm_lock);
1258 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1259 	mutex_unlock(&priv->mm_lock);
1260 
1261 	mutex_lock(&priv->obj_lock);
1262 	list_add_tail(&msm_obj->node, &priv->objects);
1263 	mutex_unlock(&priv->obj_lock);
1264 
1265 	return obj;
1266 
1267 fail:
1268 	if (struct_mutex_locked) {
1269 		drm_gem_object_put_locked(obj);
1270 	} else {
1271 		drm_gem_object_put(obj);
1272 	}
1273 	return ERR_PTR(ret);
1274 }
1275 
1276 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1277 		uint32_t size, uint32_t flags)
1278 {
1279 	return _msm_gem_new(dev, size, flags, true);
1280 }
1281 
1282 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1283 		uint32_t size, uint32_t flags)
1284 {
1285 	return _msm_gem_new(dev, size, flags, false);
1286 }
1287 
1288 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1289 		struct dma_buf *dmabuf, struct sg_table *sgt)
1290 {
1291 	struct msm_drm_private *priv = dev->dev_private;
1292 	struct msm_gem_object *msm_obj;
1293 	struct drm_gem_object *obj;
1294 	uint32_t size;
1295 	int ret, npages;
1296 
1297 	/* if we don't have IOMMU, don't bother pretending we can import: */
1298 	if (!msm_use_mmu(dev)) {
1299 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1300 		return ERR_PTR(-EINVAL);
1301 	}
1302 
1303 	size = PAGE_ALIGN(dmabuf->size);
1304 
1305 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1306 	if (ret)
1307 		goto fail;
1308 
1309 	drm_gem_private_object_init(dev, obj, size);
1310 
1311 	npages = size / PAGE_SIZE;
1312 
1313 	msm_obj = to_msm_bo(obj);
1314 	msm_gem_lock(obj);
1315 	msm_obj->sgt = sgt;
1316 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1317 	if (!msm_obj->pages) {
1318 		msm_gem_unlock(obj);
1319 		ret = -ENOMEM;
1320 		goto fail;
1321 	}
1322 
1323 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1324 	if (ret) {
1325 		msm_gem_unlock(obj);
1326 		goto fail;
1327 	}
1328 
1329 	msm_gem_unlock(obj);
1330 
1331 	mutex_lock(&priv->mm_lock);
1332 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1333 	mutex_unlock(&priv->mm_lock);
1334 
1335 	mutex_lock(&priv->obj_lock);
1336 	list_add_tail(&msm_obj->node, &priv->objects);
1337 	mutex_unlock(&priv->obj_lock);
1338 
1339 	return obj;
1340 
1341 fail:
1342 	drm_gem_object_put(obj);
1343 	return ERR_PTR(ret);
1344 }
1345 
1346 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1347 		uint32_t flags, struct msm_gem_address_space *aspace,
1348 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1349 {
1350 	void *vaddr;
1351 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1352 	int ret;
1353 
1354 	if (IS_ERR(obj))
1355 		return ERR_CAST(obj);
1356 
1357 	if (iova) {
1358 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1359 		if (ret)
1360 			goto err;
1361 	}
1362 
1363 	vaddr = msm_gem_get_vaddr(obj);
1364 	if (IS_ERR(vaddr)) {
1365 		msm_gem_unpin_iova(obj, aspace);
1366 		ret = PTR_ERR(vaddr);
1367 		goto err;
1368 	}
1369 
1370 	if (bo)
1371 		*bo = obj;
1372 
1373 	return vaddr;
1374 err:
1375 	if (locked)
1376 		drm_gem_object_put_locked(obj);
1377 	else
1378 		drm_gem_object_put(obj);
1379 
1380 	return ERR_PTR(ret);
1381 
1382 }
1383 
1384 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1385 		uint32_t flags, struct msm_gem_address_space *aspace,
1386 		struct drm_gem_object **bo, uint64_t *iova)
1387 {
1388 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1389 }
1390 
1391 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1392 		uint32_t flags, struct msm_gem_address_space *aspace,
1393 		struct drm_gem_object **bo, uint64_t *iova)
1394 {
1395 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1396 }
1397 
1398 void msm_gem_kernel_put(struct drm_gem_object *bo,
1399 		struct msm_gem_address_space *aspace, bool locked)
1400 {
1401 	if (IS_ERR_OR_NULL(bo))
1402 		return;
1403 
1404 	msm_gem_put_vaddr(bo);
1405 	msm_gem_unpin_iova(bo, aspace);
1406 
1407 	if (locked)
1408 		drm_gem_object_put_locked(bo);
1409 	else
1410 		drm_gem_object_put(bo);
1411 }
1412 
1413 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1414 {
1415 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1416 	va_list ap;
1417 
1418 	if (!fmt)
1419 		return;
1420 
1421 	va_start(ap, fmt);
1422 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1423 	va_end(ap);
1424 }
1425