xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision a3115621)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
22 
23 #include "msm_drv.h"
24 #include "msm_fence.h"
25 #include "msm_gem.h"
26 #include "msm_gpu.h"
27 #include "msm_mmu.h"
28 
29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30 
31 
32 static dma_addr_t physaddr(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	struct msm_drm_private *priv = obj->dev->dev_private;
36 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 			priv->vram.paddr;
38 }
39 
40 static bool use_pages(struct drm_gem_object *obj)
41 {
42 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 	return !msm_obj->vram_node;
44 }
45 
46 /* allocate pages from VRAM carveout, used when no IOMMU: */
47 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48 {
49 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 	struct msm_drm_private *priv = obj->dev->dev_private;
51 	dma_addr_t paddr;
52 	struct page **p;
53 	int ret, i;
54 
55 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 	if (!p)
57 		return ERR_PTR(-ENOMEM);
58 
59 	spin_lock(&priv->vram.lock);
60 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 	spin_unlock(&priv->vram.lock);
62 	if (ret) {
63 		kvfree(p);
64 		return ERR_PTR(ret);
65 	}
66 
67 	paddr = physaddr(obj);
68 	for (i = 0; i < npages; i++) {
69 		p[i] = phys_to_page(paddr);
70 		paddr += PAGE_SIZE;
71 	}
72 
73 	return p;
74 }
75 
76 static struct page **get_pages(struct drm_gem_object *obj)
77 {
78 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
79 
80 	if (!msm_obj->pages) {
81 		struct drm_device *dev = obj->dev;
82 		struct page **p;
83 		int npages = obj->size >> PAGE_SHIFT;
84 
85 		if (use_pages(obj))
86 			p = drm_gem_get_pages(obj);
87 		else
88 			p = get_pages_vram(obj, npages);
89 
90 		if (IS_ERR(p)) {
91 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
92 					PTR_ERR(p));
93 			return p;
94 		}
95 
96 		msm_obj->pages = p;
97 
98 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99 		if (IS_ERR(msm_obj->sgt)) {
100 			void *ptr = ERR_CAST(msm_obj->sgt);
101 
102 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
103 			msm_obj->sgt = NULL;
104 			return ptr;
105 		}
106 
107 		/* For non-cached buffers, ensure the new pages are clean
108 		 * because display controller, GPU, etc. are not coherent:
109 		 */
110 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 	}
114 
115 	return msm_obj->pages;
116 }
117 
118 static void put_pages_vram(struct drm_gem_object *obj)
119 {
120 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 	struct msm_drm_private *priv = obj->dev->dev_private;
122 
123 	spin_lock(&priv->vram.lock);
124 	drm_mm_remove_node(msm_obj->vram_node);
125 	spin_unlock(&priv->vram.lock);
126 
127 	kvfree(msm_obj->pages);
128 }
129 
130 static void put_pages(struct drm_gem_object *obj)
131 {
132 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
133 
134 	if (msm_obj->pages) {
135 		if (msm_obj->sgt) {
136 			/* For non-cached buffers, ensure the new
137 			 * pages are clean because display controller,
138 			 * GPU, etc. are not coherent:
139 			 */
140 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 					     msm_obj->sgt->nents,
143 					     DMA_BIDIRECTIONAL);
144 
145 			sg_free_table(msm_obj->sgt);
146 			kfree(msm_obj->sgt);
147 		}
148 
149 		if (use_pages(obj))
150 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
151 		else
152 			put_pages_vram(obj);
153 
154 		msm_obj->pages = NULL;
155 	}
156 }
157 
158 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
159 {
160 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
161 	struct page **p;
162 
163 	mutex_lock(&msm_obj->lock);
164 
165 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
166 		mutex_unlock(&msm_obj->lock);
167 		return ERR_PTR(-EBUSY);
168 	}
169 
170 	p = get_pages(obj);
171 	mutex_unlock(&msm_obj->lock);
172 	return p;
173 }
174 
175 void msm_gem_put_pages(struct drm_gem_object *obj)
176 {
177 	/* when we start tracking the pin count, then do something here */
178 }
179 
180 int msm_gem_mmap_obj(struct drm_gem_object *obj,
181 		struct vm_area_struct *vma)
182 {
183 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
184 
185 	vma->vm_flags &= ~VM_PFNMAP;
186 	vma->vm_flags |= VM_MIXEDMAP;
187 
188 	if (msm_obj->flags & MSM_BO_WC) {
189 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
190 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
191 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
192 	} else {
193 		/*
194 		 * Shunt off cached objs to shmem file so they have their own
195 		 * address_space (so unmap_mapping_range does what we want,
196 		 * in particular in the case of mmap'd dmabufs)
197 		 */
198 		fput(vma->vm_file);
199 		get_file(obj->filp);
200 		vma->vm_pgoff = 0;
201 		vma->vm_file  = obj->filp;
202 
203 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
204 	}
205 
206 	return 0;
207 }
208 
209 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
210 {
211 	int ret;
212 
213 	ret = drm_gem_mmap(filp, vma);
214 	if (ret) {
215 		DBG("mmap failed: %d", ret);
216 		return ret;
217 	}
218 
219 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
220 }
221 
222 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
223 {
224 	struct vm_area_struct *vma = vmf->vma;
225 	struct drm_gem_object *obj = vma->vm_private_data;
226 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
227 	struct page **pages;
228 	unsigned long pfn;
229 	pgoff_t pgoff;
230 	int err;
231 	vm_fault_t ret;
232 
233 	/*
234 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
235 	 * a reference on obj. So, we dont need to hold one here.
236 	 */
237 	err = mutex_lock_interruptible(&msm_obj->lock);
238 	if (err) {
239 		ret = VM_FAULT_NOPAGE;
240 		goto out;
241 	}
242 
243 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
244 		mutex_unlock(&msm_obj->lock);
245 		return VM_FAULT_SIGBUS;
246 	}
247 
248 	/* make sure we have pages attached now */
249 	pages = get_pages(obj);
250 	if (IS_ERR(pages)) {
251 		ret = vmf_error(PTR_ERR(pages));
252 		goto out_unlock;
253 	}
254 
255 	/* We don't use vmf->pgoff since that has the fake offset: */
256 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
257 
258 	pfn = page_to_pfn(pages[pgoff]);
259 
260 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
261 			pfn, pfn << PAGE_SHIFT);
262 
263 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
264 out_unlock:
265 	mutex_unlock(&msm_obj->lock);
266 out:
267 	return ret;
268 }
269 
270 /** get mmap offset */
271 static uint64_t mmap_offset(struct drm_gem_object *obj)
272 {
273 	struct drm_device *dev = obj->dev;
274 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
275 	int ret;
276 
277 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
278 
279 	/* Make it mmapable */
280 	ret = drm_gem_create_mmap_offset(obj);
281 
282 	if (ret) {
283 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
284 		return 0;
285 	}
286 
287 	return drm_vma_node_offset_addr(&obj->vma_node);
288 }
289 
290 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
291 {
292 	uint64_t offset;
293 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
294 
295 	mutex_lock(&msm_obj->lock);
296 	offset = mmap_offset(obj);
297 	mutex_unlock(&msm_obj->lock);
298 	return offset;
299 }
300 
301 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
302 		struct msm_gem_address_space *aspace)
303 {
304 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
305 	struct msm_gem_vma *vma;
306 
307 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
308 
309 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
310 	if (!vma)
311 		return ERR_PTR(-ENOMEM);
312 
313 	vma->aspace = aspace;
314 
315 	list_add_tail(&vma->list, &msm_obj->vmas);
316 
317 	return vma;
318 }
319 
320 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
321 		struct msm_gem_address_space *aspace)
322 {
323 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
324 	struct msm_gem_vma *vma;
325 
326 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
327 
328 	list_for_each_entry(vma, &msm_obj->vmas, list) {
329 		if (vma->aspace == aspace)
330 			return vma;
331 	}
332 
333 	return NULL;
334 }
335 
336 static void del_vma(struct msm_gem_vma *vma)
337 {
338 	if (!vma)
339 		return;
340 
341 	list_del(&vma->list);
342 	kfree(vma);
343 }
344 
345 /* Called with msm_obj->lock locked */
346 static void
347 put_iova(struct drm_gem_object *obj)
348 {
349 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
350 	struct msm_gem_vma *vma, *tmp;
351 
352 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
353 
354 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
355 		msm_gem_purge_vma(vma->aspace, vma);
356 		msm_gem_close_vma(vma->aspace, vma);
357 		del_vma(vma);
358 	}
359 }
360 
361 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
362 		struct msm_gem_address_space *aspace, uint64_t *iova)
363 {
364 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
365 	struct msm_gem_vma *vma;
366 	int ret = 0;
367 
368 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
369 
370 	vma = lookup_vma(obj, aspace);
371 
372 	if (!vma) {
373 		vma = add_vma(obj, aspace);
374 		if (IS_ERR(vma))
375 			return PTR_ERR(vma);
376 
377 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
378 		if (ret) {
379 			del_vma(vma);
380 			return ret;
381 		}
382 	}
383 
384 	*iova = vma->iova;
385 	return 0;
386 }
387 
388 static int msm_gem_pin_iova(struct drm_gem_object *obj,
389 		struct msm_gem_address_space *aspace)
390 {
391 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
392 	struct msm_gem_vma *vma;
393 	struct page **pages;
394 
395 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
396 
397 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
398 		return -EBUSY;
399 
400 	vma = lookup_vma(obj, aspace);
401 	if (WARN_ON(!vma))
402 		return -EINVAL;
403 
404 	pages = get_pages(obj);
405 	if (IS_ERR(pages))
406 		return PTR_ERR(pages);
407 
408 	return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
409 			obj->size >> PAGE_SHIFT);
410 }
411 
412 /* get iova and pin it. Should have a matching put */
413 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
414 		struct msm_gem_address_space *aspace, uint64_t *iova)
415 {
416 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
417 	u64 local;
418 	int ret;
419 
420 	mutex_lock(&msm_obj->lock);
421 
422 	ret = msm_gem_get_iova_locked(obj, aspace, &local);
423 
424 	if (!ret)
425 		ret = msm_gem_pin_iova(obj, aspace);
426 
427 	if (!ret)
428 		*iova = local;
429 
430 	mutex_unlock(&msm_obj->lock);
431 	return ret;
432 }
433 
434 /*
435  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
436  * valid for the life of the object
437  */
438 int msm_gem_get_iova(struct drm_gem_object *obj,
439 		struct msm_gem_address_space *aspace, uint64_t *iova)
440 {
441 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
442 	int ret;
443 
444 	mutex_lock(&msm_obj->lock);
445 	ret = msm_gem_get_iova_locked(obj, aspace, iova);
446 	mutex_unlock(&msm_obj->lock);
447 
448 	return ret;
449 }
450 
451 /* get iova without taking a reference, used in places where you have
452  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
453  */
454 uint64_t msm_gem_iova(struct drm_gem_object *obj,
455 		struct msm_gem_address_space *aspace)
456 {
457 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
458 	struct msm_gem_vma *vma;
459 
460 	mutex_lock(&msm_obj->lock);
461 	vma = lookup_vma(obj, aspace);
462 	mutex_unlock(&msm_obj->lock);
463 	WARN_ON(!vma);
464 
465 	return vma ? vma->iova : 0;
466 }
467 
468 /*
469  * Unpin a iova by updating the reference counts. The memory isn't actually
470  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
471  * to get rid of it
472  */
473 void msm_gem_unpin_iova(struct drm_gem_object *obj,
474 		struct msm_gem_address_space *aspace)
475 {
476 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
477 	struct msm_gem_vma *vma;
478 
479 	mutex_lock(&msm_obj->lock);
480 	vma = lookup_vma(obj, aspace);
481 
482 	if (!WARN_ON(!vma))
483 		msm_gem_unmap_vma(aspace, vma);
484 
485 	mutex_unlock(&msm_obj->lock);
486 }
487 
488 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
489 		struct drm_mode_create_dumb *args)
490 {
491 	args->pitch = align_pitch(args->width, args->bpp);
492 	args->size  = PAGE_ALIGN(args->pitch * args->height);
493 	return msm_gem_new_handle(dev, file, args->size,
494 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
495 }
496 
497 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
498 		uint32_t handle, uint64_t *offset)
499 {
500 	struct drm_gem_object *obj;
501 	int ret = 0;
502 
503 	/* GEM does all our handle to object mapping */
504 	obj = drm_gem_object_lookup(file, handle);
505 	if (obj == NULL) {
506 		ret = -ENOENT;
507 		goto fail;
508 	}
509 
510 	*offset = msm_gem_mmap_offset(obj);
511 
512 	drm_gem_object_put_unlocked(obj);
513 
514 fail:
515 	return ret;
516 }
517 
518 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
519 {
520 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
521 	int ret = 0;
522 
523 	mutex_lock(&msm_obj->lock);
524 
525 	if (WARN_ON(msm_obj->madv > madv)) {
526 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
527 			msm_obj->madv, madv);
528 		mutex_unlock(&msm_obj->lock);
529 		return ERR_PTR(-EBUSY);
530 	}
531 
532 	/* increment vmap_count *before* vmap() call, so shrinker can
533 	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
534 	 * This guarantees that we won't try to msm_gem_vunmap() this
535 	 * same object from within the vmap() call (while we already
536 	 * hold msm_obj->lock)
537 	 */
538 	msm_obj->vmap_count++;
539 
540 	if (!msm_obj->vaddr) {
541 		struct page **pages = get_pages(obj);
542 		if (IS_ERR(pages)) {
543 			ret = PTR_ERR(pages);
544 			goto fail;
545 		}
546 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
547 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
548 		if (msm_obj->vaddr == NULL) {
549 			ret = -ENOMEM;
550 			goto fail;
551 		}
552 	}
553 
554 	mutex_unlock(&msm_obj->lock);
555 	return msm_obj->vaddr;
556 
557 fail:
558 	msm_obj->vmap_count--;
559 	mutex_unlock(&msm_obj->lock);
560 	return ERR_PTR(ret);
561 }
562 
563 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
564 {
565 	return get_vaddr(obj, MSM_MADV_WILLNEED);
566 }
567 
568 /*
569  * Don't use this!  It is for the very special case of dumping
570  * submits from GPU hangs or faults, were the bo may already
571  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
572  * active list.
573  */
574 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
575 {
576 	return get_vaddr(obj, __MSM_MADV_PURGED);
577 }
578 
579 void msm_gem_put_vaddr(struct drm_gem_object *obj)
580 {
581 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
582 
583 	mutex_lock(&msm_obj->lock);
584 	WARN_ON(msm_obj->vmap_count < 1);
585 	msm_obj->vmap_count--;
586 	mutex_unlock(&msm_obj->lock);
587 }
588 
589 /* Update madvise status, returns true if not purged, else
590  * false or -errno.
591  */
592 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
593 {
594 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
595 
596 	mutex_lock(&msm_obj->lock);
597 
598 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
599 
600 	if (msm_obj->madv != __MSM_MADV_PURGED)
601 		msm_obj->madv = madv;
602 
603 	madv = msm_obj->madv;
604 
605 	mutex_unlock(&msm_obj->lock);
606 
607 	return (madv != __MSM_MADV_PURGED);
608 }
609 
610 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
611 {
612 	struct drm_device *dev = obj->dev;
613 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
614 
615 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
616 	WARN_ON(!is_purgeable(msm_obj));
617 	WARN_ON(obj->import_attach);
618 
619 	mutex_lock_nested(&msm_obj->lock, subclass);
620 
621 	put_iova(obj);
622 
623 	msm_gem_vunmap_locked(obj);
624 
625 	put_pages(obj);
626 
627 	msm_obj->madv = __MSM_MADV_PURGED;
628 
629 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
630 	drm_gem_free_mmap_offset(obj);
631 
632 	/* Our goal here is to return as much of the memory as
633 	 * is possible back to the system as we are called from OOM.
634 	 * To do this we must instruct the shmfs to drop all of its
635 	 * backing pages, *now*.
636 	 */
637 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
638 
639 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
640 			0, (loff_t)-1);
641 
642 	mutex_unlock(&msm_obj->lock);
643 }
644 
645 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
646 {
647 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
648 
649 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
650 
651 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
652 		return;
653 
654 	vunmap(msm_obj->vaddr);
655 	msm_obj->vaddr = NULL;
656 }
657 
658 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
659 {
660 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
661 
662 	mutex_lock_nested(&msm_obj->lock, subclass);
663 	msm_gem_vunmap_locked(obj);
664 	mutex_unlock(&msm_obj->lock);
665 }
666 
667 /* must be called before _move_to_active().. */
668 int msm_gem_sync_object(struct drm_gem_object *obj,
669 		struct msm_fence_context *fctx, bool exclusive)
670 {
671 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
672 	struct reservation_object_list *fobj;
673 	struct dma_fence *fence;
674 	int i, ret;
675 
676 	fobj = reservation_object_get_list(msm_obj->resv);
677 	if (!fobj || (fobj->shared_count == 0)) {
678 		fence = reservation_object_get_excl(msm_obj->resv);
679 		/* don't need to wait on our own fences, since ring is fifo */
680 		if (fence && (fence->context != fctx->context)) {
681 			ret = dma_fence_wait(fence, true);
682 			if (ret)
683 				return ret;
684 		}
685 	}
686 
687 	if (!exclusive || !fobj)
688 		return 0;
689 
690 	for (i = 0; i < fobj->shared_count; i++) {
691 		fence = rcu_dereference_protected(fobj->shared[i],
692 						reservation_object_held(msm_obj->resv));
693 		if (fence->context != fctx->context) {
694 			ret = dma_fence_wait(fence, true);
695 			if (ret)
696 				return ret;
697 		}
698 	}
699 
700 	return 0;
701 }
702 
703 void msm_gem_move_to_active(struct drm_gem_object *obj,
704 		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
705 {
706 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
707 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
708 	msm_obj->gpu = gpu;
709 	if (exclusive)
710 		reservation_object_add_excl_fence(msm_obj->resv, fence);
711 	else
712 		reservation_object_add_shared_fence(msm_obj->resv, fence);
713 	list_del_init(&msm_obj->mm_list);
714 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
715 }
716 
717 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
718 {
719 	struct drm_device *dev = obj->dev;
720 	struct msm_drm_private *priv = dev->dev_private;
721 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
722 
723 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
724 
725 	msm_obj->gpu = NULL;
726 	list_del_init(&msm_obj->mm_list);
727 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
728 }
729 
730 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
731 {
732 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
733 	bool write = !!(op & MSM_PREP_WRITE);
734 	unsigned long remain =
735 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
736 	long ret;
737 
738 	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
739 						  true,  remain);
740 	if (ret == 0)
741 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
742 	else if (ret < 0)
743 		return ret;
744 
745 	/* TODO cache maintenance */
746 
747 	return 0;
748 }
749 
750 int msm_gem_cpu_fini(struct drm_gem_object *obj)
751 {
752 	/* TODO cache maintenance */
753 	return 0;
754 }
755 
756 #ifdef CONFIG_DEBUG_FS
757 static void describe_fence(struct dma_fence *fence, const char *type,
758 		struct seq_file *m)
759 {
760 	if (!dma_fence_is_signaled(fence))
761 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
762 				fence->ops->get_driver_name(fence),
763 				fence->ops->get_timeline_name(fence),
764 				fence->seqno);
765 }
766 
767 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
768 {
769 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
770 	struct reservation_object *robj = msm_obj->resv;
771 	struct reservation_object_list *fobj;
772 	struct dma_fence *fence;
773 	struct msm_gem_vma *vma;
774 	uint64_t off = drm_vma_node_start(&obj->vma_node);
775 	const char *madv;
776 
777 	mutex_lock(&msm_obj->lock);
778 
779 	switch (msm_obj->madv) {
780 	case __MSM_MADV_PURGED:
781 		madv = " purged";
782 		break;
783 	case MSM_MADV_DONTNEED:
784 		madv = " purgeable";
785 		break;
786 	case MSM_MADV_WILLNEED:
787 	default:
788 		madv = "";
789 		break;
790 	}
791 
792 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
793 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
794 			obj->name, kref_read(&obj->refcount),
795 			off, msm_obj->vaddr);
796 
797 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
798 
799 	if (!list_empty(&msm_obj->vmas)) {
800 
801 		seq_puts(m, "      vmas:");
802 
803 		list_for_each_entry(vma, &msm_obj->vmas, list)
804 			seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
805 				vma->iova, vma->mapped ? "mapped" : "unmapped",
806 				vma->inuse);
807 
808 		seq_puts(m, "\n");
809 	}
810 
811 	rcu_read_lock();
812 	fobj = rcu_dereference(robj->fence);
813 	if (fobj) {
814 		unsigned int i, shared_count = fobj->shared_count;
815 
816 		for (i = 0; i < shared_count; i++) {
817 			fence = rcu_dereference(fobj->shared[i]);
818 			describe_fence(fence, "Shared", m);
819 		}
820 	}
821 
822 	fence = rcu_dereference(robj->fence_excl);
823 	if (fence)
824 		describe_fence(fence, "Exclusive", m);
825 	rcu_read_unlock();
826 
827 	mutex_unlock(&msm_obj->lock);
828 }
829 
830 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
831 {
832 	struct msm_gem_object *msm_obj;
833 	int count = 0;
834 	size_t size = 0;
835 
836 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
837 	list_for_each_entry(msm_obj, list, mm_list) {
838 		struct drm_gem_object *obj = &msm_obj->base;
839 		seq_puts(m, "   ");
840 		msm_gem_describe(obj, m);
841 		count++;
842 		size += obj->size;
843 	}
844 
845 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
846 }
847 #endif
848 
849 /* don't call directly!  Use drm_gem_object_put() and friends */
850 void msm_gem_free_object(struct drm_gem_object *obj)
851 {
852 	struct drm_device *dev = obj->dev;
853 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
854 
855 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
856 
857 	/* object should not be on active list: */
858 	WARN_ON(is_active(msm_obj));
859 
860 	list_del(&msm_obj->mm_list);
861 
862 	mutex_lock(&msm_obj->lock);
863 
864 	put_iova(obj);
865 
866 	if (obj->import_attach) {
867 		if (msm_obj->vaddr)
868 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
869 
870 		/* Don't drop the pages for imported dmabuf, as they are not
871 		 * ours, just free the array we allocated:
872 		 */
873 		if (msm_obj->pages)
874 			kvfree(msm_obj->pages);
875 
876 		drm_prime_gem_destroy(obj, msm_obj->sgt);
877 	} else {
878 		msm_gem_vunmap_locked(obj);
879 		put_pages(obj);
880 	}
881 
882 	if (msm_obj->resv == &msm_obj->_resv)
883 		reservation_object_fini(msm_obj->resv);
884 
885 	drm_gem_object_release(obj);
886 
887 	mutex_unlock(&msm_obj->lock);
888 	kfree(msm_obj);
889 }
890 
891 /* convenience method to construct a GEM buffer object, and userspace handle */
892 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
893 		uint32_t size, uint32_t flags, uint32_t *handle,
894 		char *name)
895 {
896 	struct drm_gem_object *obj;
897 	int ret;
898 
899 	obj = msm_gem_new(dev, size, flags);
900 
901 	if (IS_ERR(obj))
902 		return PTR_ERR(obj);
903 
904 	if (name)
905 		msm_gem_object_set_name(obj, "%s", name);
906 
907 	ret = drm_gem_handle_create(file, obj, handle);
908 
909 	/* drop reference from allocate - handle holds it now */
910 	drm_gem_object_put_unlocked(obj);
911 
912 	return ret;
913 }
914 
915 static int msm_gem_new_impl(struct drm_device *dev,
916 		uint32_t size, uint32_t flags,
917 		struct reservation_object *resv,
918 		struct drm_gem_object **obj,
919 		bool struct_mutex_locked)
920 {
921 	struct msm_drm_private *priv = dev->dev_private;
922 	struct msm_gem_object *msm_obj;
923 
924 	switch (flags & MSM_BO_CACHE_MASK) {
925 	case MSM_BO_UNCACHED:
926 	case MSM_BO_CACHED:
927 	case MSM_BO_WC:
928 		break;
929 	default:
930 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
931 				(flags & MSM_BO_CACHE_MASK));
932 		return -EINVAL;
933 	}
934 
935 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
936 	if (!msm_obj)
937 		return -ENOMEM;
938 
939 	mutex_init(&msm_obj->lock);
940 
941 	msm_obj->flags = flags;
942 	msm_obj->madv = MSM_MADV_WILLNEED;
943 
944 	if (resv) {
945 		msm_obj->resv = resv;
946 	} else {
947 		msm_obj->resv = &msm_obj->_resv;
948 		reservation_object_init(msm_obj->resv);
949 	}
950 
951 	INIT_LIST_HEAD(&msm_obj->submit_entry);
952 	INIT_LIST_HEAD(&msm_obj->vmas);
953 
954 	if (struct_mutex_locked) {
955 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
956 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
957 	} else {
958 		mutex_lock(&dev->struct_mutex);
959 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
960 		mutex_unlock(&dev->struct_mutex);
961 	}
962 
963 	*obj = &msm_obj->base;
964 
965 	return 0;
966 }
967 
968 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
969 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
970 {
971 	struct msm_drm_private *priv = dev->dev_private;
972 	struct drm_gem_object *obj = NULL;
973 	bool use_vram = false;
974 	int ret;
975 
976 	size = PAGE_ALIGN(size);
977 
978 	if (!msm_use_mmu(dev))
979 		use_vram = true;
980 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
981 		use_vram = true;
982 
983 	if (WARN_ON(use_vram && !priv->vram.size))
984 		return ERR_PTR(-EINVAL);
985 
986 	/* Disallow zero sized objects as they make the underlying
987 	 * infrastructure grumpy
988 	 */
989 	if (size == 0)
990 		return ERR_PTR(-EINVAL);
991 
992 	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
993 	if (ret)
994 		goto fail;
995 
996 	if (use_vram) {
997 		struct msm_gem_vma *vma;
998 		struct page **pages;
999 		struct msm_gem_object *msm_obj = to_msm_bo(obj);
1000 
1001 		mutex_lock(&msm_obj->lock);
1002 
1003 		vma = add_vma(obj, NULL);
1004 		mutex_unlock(&msm_obj->lock);
1005 		if (IS_ERR(vma)) {
1006 			ret = PTR_ERR(vma);
1007 			goto fail;
1008 		}
1009 
1010 		to_msm_bo(obj)->vram_node = &vma->node;
1011 
1012 		drm_gem_private_object_init(dev, obj, size);
1013 
1014 		pages = get_pages(obj);
1015 		if (IS_ERR(pages)) {
1016 			ret = PTR_ERR(pages);
1017 			goto fail;
1018 		}
1019 
1020 		vma->iova = physaddr(obj);
1021 	} else {
1022 		ret = drm_gem_object_init(dev, obj, size);
1023 		if (ret)
1024 			goto fail;
1025 	}
1026 
1027 	return obj;
1028 
1029 fail:
1030 	drm_gem_object_put_unlocked(obj);
1031 	return ERR_PTR(ret);
1032 }
1033 
1034 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1035 		uint32_t size, uint32_t flags)
1036 {
1037 	return _msm_gem_new(dev, size, flags, true);
1038 }
1039 
1040 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1041 		uint32_t size, uint32_t flags)
1042 {
1043 	return _msm_gem_new(dev, size, flags, false);
1044 }
1045 
1046 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1047 		struct dma_buf *dmabuf, struct sg_table *sgt)
1048 {
1049 	struct msm_gem_object *msm_obj;
1050 	struct drm_gem_object *obj;
1051 	uint32_t size;
1052 	int ret, npages;
1053 
1054 	/* if we don't have IOMMU, don't bother pretending we can import: */
1055 	if (!msm_use_mmu(dev)) {
1056 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1057 		return ERR_PTR(-EINVAL);
1058 	}
1059 
1060 	size = PAGE_ALIGN(dmabuf->size);
1061 
1062 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1063 	if (ret)
1064 		goto fail;
1065 
1066 	drm_gem_private_object_init(dev, obj, size);
1067 
1068 	npages = size / PAGE_SIZE;
1069 
1070 	msm_obj = to_msm_bo(obj);
1071 	mutex_lock(&msm_obj->lock);
1072 	msm_obj->sgt = sgt;
1073 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1074 	if (!msm_obj->pages) {
1075 		mutex_unlock(&msm_obj->lock);
1076 		ret = -ENOMEM;
1077 		goto fail;
1078 	}
1079 
1080 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1081 	if (ret) {
1082 		mutex_unlock(&msm_obj->lock);
1083 		goto fail;
1084 	}
1085 
1086 	mutex_unlock(&msm_obj->lock);
1087 	return obj;
1088 
1089 fail:
1090 	drm_gem_object_put_unlocked(obj);
1091 	return ERR_PTR(ret);
1092 }
1093 
1094 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1095 		uint32_t flags, struct msm_gem_address_space *aspace,
1096 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1097 {
1098 	void *vaddr;
1099 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1100 	int ret;
1101 
1102 	if (IS_ERR(obj))
1103 		return ERR_CAST(obj);
1104 
1105 	if (iova) {
1106 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1107 		if (ret)
1108 			goto err;
1109 	}
1110 
1111 	vaddr = msm_gem_get_vaddr(obj);
1112 	if (IS_ERR(vaddr)) {
1113 		msm_gem_unpin_iova(obj, aspace);
1114 		ret = PTR_ERR(vaddr);
1115 		goto err;
1116 	}
1117 
1118 	if (bo)
1119 		*bo = obj;
1120 
1121 	return vaddr;
1122 err:
1123 	if (locked)
1124 		drm_gem_object_put(obj);
1125 	else
1126 		drm_gem_object_put_unlocked(obj);
1127 
1128 	return ERR_PTR(ret);
1129 
1130 }
1131 
1132 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1133 		uint32_t flags, struct msm_gem_address_space *aspace,
1134 		struct drm_gem_object **bo, uint64_t *iova)
1135 {
1136 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1137 }
1138 
1139 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1140 		uint32_t flags, struct msm_gem_address_space *aspace,
1141 		struct drm_gem_object **bo, uint64_t *iova)
1142 {
1143 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1144 }
1145 
1146 void msm_gem_kernel_put(struct drm_gem_object *bo,
1147 		struct msm_gem_address_space *aspace, bool locked)
1148 {
1149 	if (IS_ERR_OR_NULL(bo))
1150 		return;
1151 
1152 	msm_gem_put_vaddr(bo);
1153 	msm_gem_unpin_iova(bo, aspace);
1154 
1155 	if (locked)
1156 		drm_gem_object_put(bo);
1157 	else
1158 		drm_gem_object_put_unlocked(bo);
1159 }
1160 
1161 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1162 {
1163 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1164 	va_list ap;
1165 
1166 	if (!fmt)
1167 		return;
1168 
1169 	va_start(ap, fmt);
1170 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1171 	va_end(ap);
1172 }
1173