xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 575f0485)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
22 
23 #include "msm_drv.h"
24 #include "msm_fence.h"
25 #include "msm_gem.h"
26 #include "msm_gpu.h"
27 #include "msm_mmu.h"
28 
29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30 
31 
32 static dma_addr_t physaddr(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	struct msm_drm_private *priv = obj->dev->dev_private;
36 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 			priv->vram.paddr;
38 }
39 
40 static bool use_pages(struct drm_gem_object *obj)
41 {
42 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 	return !msm_obj->vram_node;
44 }
45 
46 /* allocate pages from VRAM carveout, used when no IOMMU: */
47 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48 {
49 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 	struct msm_drm_private *priv = obj->dev->dev_private;
51 	dma_addr_t paddr;
52 	struct page **p;
53 	int ret, i;
54 
55 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 	if (!p)
57 		return ERR_PTR(-ENOMEM);
58 
59 	spin_lock(&priv->vram.lock);
60 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 	spin_unlock(&priv->vram.lock);
62 	if (ret) {
63 		kvfree(p);
64 		return ERR_PTR(ret);
65 	}
66 
67 	paddr = physaddr(obj);
68 	for (i = 0; i < npages; i++) {
69 		p[i] = phys_to_page(paddr);
70 		paddr += PAGE_SIZE;
71 	}
72 
73 	return p;
74 }
75 
76 static struct page **get_pages(struct drm_gem_object *obj)
77 {
78 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
79 
80 	if (!msm_obj->pages) {
81 		struct drm_device *dev = obj->dev;
82 		struct page **p;
83 		int npages = obj->size >> PAGE_SHIFT;
84 
85 		if (use_pages(obj))
86 			p = drm_gem_get_pages(obj);
87 		else
88 			p = get_pages_vram(obj, npages);
89 
90 		if (IS_ERR(p)) {
91 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
92 					PTR_ERR(p));
93 			return p;
94 		}
95 
96 		msm_obj->pages = p;
97 
98 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99 		if (IS_ERR(msm_obj->sgt)) {
100 			void *ptr = ERR_CAST(msm_obj->sgt);
101 
102 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
103 			msm_obj->sgt = NULL;
104 			return ptr;
105 		}
106 
107 		/* For non-cached buffers, ensure the new pages are clean
108 		 * because display controller, GPU, etc. are not coherent:
109 		 */
110 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
113 	}
114 
115 	return msm_obj->pages;
116 }
117 
118 static void put_pages_vram(struct drm_gem_object *obj)
119 {
120 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 	struct msm_drm_private *priv = obj->dev->dev_private;
122 
123 	spin_lock(&priv->vram.lock);
124 	drm_mm_remove_node(msm_obj->vram_node);
125 	spin_unlock(&priv->vram.lock);
126 
127 	kvfree(msm_obj->pages);
128 }
129 
130 static void put_pages(struct drm_gem_object *obj)
131 {
132 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
133 
134 	if (msm_obj->pages) {
135 		if (msm_obj->sgt) {
136 			/* For non-cached buffers, ensure the new
137 			 * pages are clean because display controller,
138 			 * GPU, etc. are not coherent:
139 			 */
140 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 					     msm_obj->sgt->nents,
143 					     DMA_BIDIRECTIONAL);
144 
145 			sg_free_table(msm_obj->sgt);
146 			kfree(msm_obj->sgt);
147 		}
148 
149 		if (use_pages(obj))
150 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
151 		else
152 			put_pages_vram(obj);
153 
154 		msm_obj->pages = NULL;
155 	}
156 }
157 
158 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
159 {
160 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
161 	struct page **p;
162 
163 	mutex_lock(&msm_obj->lock);
164 
165 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
166 		mutex_unlock(&msm_obj->lock);
167 		return ERR_PTR(-EBUSY);
168 	}
169 
170 	p = get_pages(obj);
171 	mutex_unlock(&msm_obj->lock);
172 	return p;
173 }
174 
175 void msm_gem_put_pages(struct drm_gem_object *obj)
176 {
177 	/* when we start tracking the pin count, then do something here */
178 }
179 
180 int msm_gem_mmap_obj(struct drm_gem_object *obj,
181 		struct vm_area_struct *vma)
182 {
183 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
184 
185 	vma->vm_flags &= ~VM_PFNMAP;
186 	vma->vm_flags |= VM_MIXEDMAP;
187 
188 	if (msm_obj->flags & MSM_BO_WC) {
189 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
190 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
191 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
192 	} else {
193 		/*
194 		 * Shunt off cached objs to shmem file so they have their own
195 		 * address_space (so unmap_mapping_range does what we want,
196 		 * in particular in the case of mmap'd dmabufs)
197 		 */
198 		fput(vma->vm_file);
199 		get_file(obj->filp);
200 		vma->vm_pgoff = 0;
201 		vma->vm_file  = obj->filp;
202 
203 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
204 	}
205 
206 	return 0;
207 }
208 
209 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
210 {
211 	int ret;
212 
213 	ret = drm_gem_mmap(filp, vma);
214 	if (ret) {
215 		DBG("mmap failed: %d", ret);
216 		return ret;
217 	}
218 
219 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
220 }
221 
222 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
223 {
224 	struct vm_area_struct *vma = vmf->vma;
225 	struct drm_gem_object *obj = vma->vm_private_data;
226 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
227 	struct page **pages;
228 	unsigned long pfn;
229 	pgoff_t pgoff;
230 	int err;
231 	vm_fault_t ret;
232 
233 	/*
234 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
235 	 * a reference on obj. So, we dont need to hold one here.
236 	 */
237 	err = mutex_lock_interruptible(&msm_obj->lock);
238 	if (err) {
239 		ret = VM_FAULT_NOPAGE;
240 		goto out;
241 	}
242 
243 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
244 		mutex_unlock(&msm_obj->lock);
245 		return VM_FAULT_SIGBUS;
246 	}
247 
248 	/* make sure we have pages attached now */
249 	pages = get_pages(obj);
250 	if (IS_ERR(pages)) {
251 		ret = vmf_error(PTR_ERR(pages));
252 		goto out_unlock;
253 	}
254 
255 	/* We don't use vmf->pgoff since that has the fake offset: */
256 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
257 
258 	pfn = page_to_pfn(pages[pgoff]);
259 
260 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
261 			pfn, pfn << PAGE_SHIFT);
262 
263 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
264 out_unlock:
265 	mutex_unlock(&msm_obj->lock);
266 out:
267 	return ret;
268 }
269 
270 /** get mmap offset */
271 static uint64_t mmap_offset(struct drm_gem_object *obj)
272 {
273 	struct drm_device *dev = obj->dev;
274 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
275 	int ret;
276 
277 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
278 
279 	/* Make it mmapable */
280 	ret = drm_gem_create_mmap_offset(obj);
281 
282 	if (ret) {
283 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
284 		return 0;
285 	}
286 
287 	return drm_vma_node_offset_addr(&obj->vma_node);
288 }
289 
290 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
291 {
292 	uint64_t offset;
293 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
294 
295 	mutex_lock(&msm_obj->lock);
296 	offset = mmap_offset(obj);
297 	mutex_unlock(&msm_obj->lock);
298 	return offset;
299 }
300 
301 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
302 		struct msm_gem_address_space *aspace)
303 {
304 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
305 	struct msm_gem_vma *vma;
306 
307 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
308 
309 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
310 	if (!vma)
311 		return ERR_PTR(-ENOMEM);
312 
313 	vma->aspace = aspace;
314 
315 	list_add_tail(&vma->list, &msm_obj->vmas);
316 
317 	return vma;
318 }
319 
320 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
321 		struct msm_gem_address_space *aspace)
322 {
323 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
324 	struct msm_gem_vma *vma;
325 
326 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
327 
328 	list_for_each_entry(vma, &msm_obj->vmas, list) {
329 		if (vma->aspace == aspace)
330 			return vma;
331 	}
332 
333 	return NULL;
334 }
335 
336 static void del_vma(struct msm_gem_vma *vma)
337 {
338 	if (!vma)
339 		return;
340 
341 	list_del(&vma->list);
342 	kfree(vma);
343 }
344 
345 /* Called with msm_obj->lock locked */
346 static void
347 put_iova(struct drm_gem_object *obj)
348 {
349 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
350 	struct msm_gem_vma *vma, *tmp;
351 
352 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
353 
354 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
355 		msm_gem_unmap_vma(vma->aspace, vma);
356 		del_vma(vma);
357 	}
358 }
359 
360 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
361 		struct msm_gem_address_space *aspace, uint64_t *iova)
362 {
363 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
364 	struct msm_gem_vma *vma;
365 	int ret = 0;
366 
367 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
368 
369 	vma = lookup_vma(obj, aspace);
370 
371 	if (!vma) {
372 		vma = add_vma(obj, aspace);
373 		if (IS_ERR(vma))
374 			return PTR_ERR(vma);
375 
376 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
377 		if (ret) {
378 			del_vma(vma);
379 			return ret;
380 		}
381 	}
382 
383 	*iova = vma->iova;
384 	return 0;
385 }
386 
387 static int msm_gem_pin_iova(struct drm_gem_object *obj,
388 		struct msm_gem_address_space *aspace)
389 {
390 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
391 	struct msm_gem_vma *vma;
392 	struct page **pages;
393 
394 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
395 
396 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
397 		return -EBUSY;
398 
399 	vma = lookup_vma(obj, aspace);
400 	if (WARN_ON(!vma))
401 		return -EINVAL;
402 
403 	pages = get_pages(obj);
404 	if (IS_ERR(pages))
405 		return PTR_ERR(pages);
406 
407 	return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
408 			obj->size >> PAGE_SHIFT);
409 }
410 
411 
412 /* get iova, taking a reference.  Should have a matching put */
413 int msm_gem_get_iova(struct drm_gem_object *obj,
414 		struct msm_gem_address_space *aspace, uint64_t *iova)
415 {
416 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
417 	u64 local;
418 	int ret;
419 
420 	mutex_lock(&msm_obj->lock);
421 
422 	ret = msm_gem_get_iova_locked(obj, aspace, &local);
423 
424 	if (!ret)
425 		ret = msm_gem_pin_iova(obj, aspace);
426 
427 	if (!ret)
428 		*iova = local;
429 
430 	mutex_unlock(&msm_obj->lock);
431 	return ret;
432 }
433 
434 /* get iova without taking a reference, used in places where you have
435  * already done a 'msm_gem_get_iova()'.
436  */
437 uint64_t msm_gem_iova(struct drm_gem_object *obj,
438 		struct msm_gem_address_space *aspace)
439 {
440 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
441 	struct msm_gem_vma *vma;
442 
443 	mutex_lock(&msm_obj->lock);
444 	vma = lookup_vma(obj, aspace);
445 	mutex_unlock(&msm_obj->lock);
446 	WARN_ON(!vma);
447 
448 	return vma ? vma->iova : 0;
449 }
450 
451 void msm_gem_put_iova(struct drm_gem_object *obj,
452 		struct msm_gem_address_space *aspace)
453 {
454 	// XXX TODO ..
455 	// NOTE: probably don't need a _locked() version.. we wouldn't
456 	// normally unmap here, but instead just mark that it could be
457 	// unmapped (if the iova refcnt drops to zero), but then later
458 	// if another _get_iova_locked() fails we can start unmapping
459 	// things that are no longer needed..
460 }
461 
462 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
463 		struct drm_mode_create_dumb *args)
464 {
465 	args->pitch = align_pitch(args->width, args->bpp);
466 	args->size  = PAGE_ALIGN(args->pitch * args->height);
467 	return msm_gem_new_handle(dev, file, args->size,
468 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
469 }
470 
471 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
472 		uint32_t handle, uint64_t *offset)
473 {
474 	struct drm_gem_object *obj;
475 	int ret = 0;
476 
477 	/* GEM does all our handle to object mapping */
478 	obj = drm_gem_object_lookup(file, handle);
479 	if (obj == NULL) {
480 		ret = -ENOENT;
481 		goto fail;
482 	}
483 
484 	*offset = msm_gem_mmap_offset(obj);
485 
486 	drm_gem_object_put_unlocked(obj);
487 
488 fail:
489 	return ret;
490 }
491 
492 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
493 {
494 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
495 	int ret = 0;
496 
497 	mutex_lock(&msm_obj->lock);
498 
499 	if (WARN_ON(msm_obj->madv > madv)) {
500 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
501 			msm_obj->madv, madv);
502 		mutex_unlock(&msm_obj->lock);
503 		return ERR_PTR(-EBUSY);
504 	}
505 
506 	/* increment vmap_count *before* vmap() call, so shrinker can
507 	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
508 	 * This guarantees that we won't try to msm_gem_vunmap() this
509 	 * same object from within the vmap() call (while we already
510 	 * hold msm_obj->lock)
511 	 */
512 	msm_obj->vmap_count++;
513 
514 	if (!msm_obj->vaddr) {
515 		struct page **pages = get_pages(obj);
516 		if (IS_ERR(pages)) {
517 			ret = PTR_ERR(pages);
518 			goto fail;
519 		}
520 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
521 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
522 		if (msm_obj->vaddr == NULL) {
523 			ret = -ENOMEM;
524 			goto fail;
525 		}
526 	}
527 
528 	mutex_unlock(&msm_obj->lock);
529 	return msm_obj->vaddr;
530 
531 fail:
532 	msm_obj->vmap_count--;
533 	mutex_unlock(&msm_obj->lock);
534 	return ERR_PTR(ret);
535 }
536 
537 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
538 {
539 	return get_vaddr(obj, MSM_MADV_WILLNEED);
540 }
541 
542 /*
543  * Don't use this!  It is for the very special case of dumping
544  * submits from GPU hangs or faults, were the bo may already
545  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
546  * active list.
547  */
548 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
549 {
550 	return get_vaddr(obj, __MSM_MADV_PURGED);
551 }
552 
553 void msm_gem_put_vaddr(struct drm_gem_object *obj)
554 {
555 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
556 
557 	mutex_lock(&msm_obj->lock);
558 	WARN_ON(msm_obj->vmap_count < 1);
559 	msm_obj->vmap_count--;
560 	mutex_unlock(&msm_obj->lock);
561 }
562 
563 /* Update madvise status, returns true if not purged, else
564  * false or -errno.
565  */
566 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
567 {
568 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
569 
570 	mutex_lock(&msm_obj->lock);
571 
572 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
573 
574 	if (msm_obj->madv != __MSM_MADV_PURGED)
575 		msm_obj->madv = madv;
576 
577 	madv = msm_obj->madv;
578 
579 	mutex_unlock(&msm_obj->lock);
580 
581 	return (madv != __MSM_MADV_PURGED);
582 }
583 
584 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
585 {
586 	struct drm_device *dev = obj->dev;
587 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
588 
589 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
590 	WARN_ON(!is_purgeable(msm_obj));
591 	WARN_ON(obj->import_attach);
592 
593 	mutex_lock_nested(&msm_obj->lock, subclass);
594 
595 	put_iova(obj);
596 
597 	msm_gem_vunmap_locked(obj);
598 
599 	put_pages(obj);
600 
601 	msm_obj->madv = __MSM_MADV_PURGED;
602 
603 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
604 	drm_gem_free_mmap_offset(obj);
605 
606 	/* Our goal here is to return as much of the memory as
607 	 * is possible back to the system as we are called from OOM.
608 	 * To do this we must instruct the shmfs to drop all of its
609 	 * backing pages, *now*.
610 	 */
611 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
612 
613 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
614 			0, (loff_t)-1);
615 
616 	mutex_unlock(&msm_obj->lock);
617 }
618 
619 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
620 {
621 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
622 
623 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
624 
625 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
626 		return;
627 
628 	vunmap(msm_obj->vaddr);
629 	msm_obj->vaddr = NULL;
630 }
631 
632 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
633 {
634 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
635 
636 	mutex_lock_nested(&msm_obj->lock, subclass);
637 	msm_gem_vunmap_locked(obj);
638 	mutex_unlock(&msm_obj->lock);
639 }
640 
641 /* must be called before _move_to_active().. */
642 int msm_gem_sync_object(struct drm_gem_object *obj,
643 		struct msm_fence_context *fctx, bool exclusive)
644 {
645 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
646 	struct reservation_object_list *fobj;
647 	struct dma_fence *fence;
648 	int i, ret;
649 
650 	fobj = reservation_object_get_list(msm_obj->resv);
651 	if (!fobj || (fobj->shared_count == 0)) {
652 		fence = reservation_object_get_excl(msm_obj->resv);
653 		/* don't need to wait on our own fences, since ring is fifo */
654 		if (fence && (fence->context != fctx->context)) {
655 			ret = dma_fence_wait(fence, true);
656 			if (ret)
657 				return ret;
658 		}
659 	}
660 
661 	if (!exclusive || !fobj)
662 		return 0;
663 
664 	for (i = 0; i < fobj->shared_count; i++) {
665 		fence = rcu_dereference_protected(fobj->shared[i],
666 						reservation_object_held(msm_obj->resv));
667 		if (fence->context != fctx->context) {
668 			ret = dma_fence_wait(fence, true);
669 			if (ret)
670 				return ret;
671 		}
672 	}
673 
674 	return 0;
675 }
676 
677 void msm_gem_move_to_active(struct drm_gem_object *obj,
678 		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
679 {
680 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
681 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
682 	msm_obj->gpu = gpu;
683 	if (exclusive)
684 		reservation_object_add_excl_fence(msm_obj->resv, fence);
685 	else
686 		reservation_object_add_shared_fence(msm_obj->resv, fence);
687 	list_del_init(&msm_obj->mm_list);
688 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
689 }
690 
691 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
692 {
693 	struct drm_device *dev = obj->dev;
694 	struct msm_drm_private *priv = dev->dev_private;
695 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
696 
697 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
698 
699 	msm_obj->gpu = NULL;
700 	list_del_init(&msm_obj->mm_list);
701 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
702 }
703 
704 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
705 {
706 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
707 	bool write = !!(op & MSM_PREP_WRITE);
708 	unsigned long remain =
709 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
710 	long ret;
711 
712 	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
713 						  true,  remain);
714 	if (ret == 0)
715 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
716 	else if (ret < 0)
717 		return ret;
718 
719 	/* TODO cache maintenance */
720 
721 	return 0;
722 }
723 
724 int msm_gem_cpu_fini(struct drm_gem_object *obj)
725 {
726 	/* TODO cache maintenance */
727 	return 0;
728 }
729 
730 #ifdef CONFIG_DEBUG_FS
731 static void describe_fence(struct dma_fence *fence, const char *type,
732 		struct seq_file *m)
733 {
734 	if (!dma_fence_is_signaled(fence))
735 		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
736 				fence->ops->get_driver_name(fence),
737 				fence->ops->get_timeline_name(fence),
738 				fence->seqno);
739 }
740 
741 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
742 {
743 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
744 	struct reservation_object *robj = msm_obj->resv;
745 	struct reservation_object_list *fobj;
746 	struct dma_fence *fence;
747 	struct msm_gem_vma *vma;
748 	uint64_t off = drm_vma_node_start(&obj->vma_node);
749 	const char *madv;
750 
751 	mutex_lock(&msm_obj->lock);
752 
753 	switch (msm_obj->madv) {
754 	case __MSM_MADV_PURGED:
755 		madv = " purged";
756 		break;
757 	case MSM_MADV_DONTNEED:
758 		madv = " purgeable";
759 		break;
760 	case MSM_MADV_WILLNEED:
761 	default:
762 		madv = "";
763 		break;
764 	}
765 
766 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
767 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
768 			obj->name, kref_read(&obj->refcount),
769 			off, msm_obj->vaddr);
770 
771 	seq_printf(m, " %08zu%9s\n", obj->size, madv);
772 
773 	if (!list_empty(&msm_obj->vmas)) {
774 
775 		seq_puts(m, "   vmas:");
776 
777 		list_for_each_entry(vma, &msm_obj->vmas, list)
778 			seq_printf(m, " [%s: %08llx,%s]", vma->aspace->name,
779 				vma->iova, vma->mapped ? "mapped" : "unmapped");
780 
781 		seq_puts(m, "\n");
782 	}
783 
784 	rcu_read_lock();
785 	fobj = rcu_dereference(robj->fence);
786 	if (fobj) {
787 		unsigned int i, shared_count = fobj->shared_count;
788 
789 		for (i = 0; i < shared_count; i++) {
790 			fence = rcu_dereference(fobj->shared[i]);
791 			describe_fence(fence, "Shared", m);
792 		}
793 	}
794 
795 	fence = rcu_dereference(robj->fence_excl);
796 	if (fence)
797 		describe_fence(fence, "Exclusive", m);
798 	rcu_read_unlock();
799 
800 	mutex_unlock(&msm_obj->lock);
801 }
802 
803 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
804 {
805 	struct msm_gem_object *msm_obj;
806 	int count = 0;
807 	size_t size = 0;
808 
809 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv\n");
810 	list_for_each_entry(msm_obj, list, mm_list) {
811 		struct drm_gem_object *obj = &msm_obj->base;
812 		seq_puts(m, "   ");
813 		msm_gem_describe(obj, m);
814 		count++;
815 		size += obj->size;
816 	}
817 
818 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
819 }
820 #endif
821 
822 /* don't call directly!  Use drm_gem_object_put() and friends */
823 void msm_gem_free_object(struct drm_gem_object *obj)
824 {
825 	struct drm_device *dev = obj->dev;
826 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
827 
828 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
829 
830 	/* object should not be on active list: */
831 	WARN_ON(is_active(msm_obj));
832 
833 	list_del(&msm_obj->mm_list);
834 
835 	mutex_lock(&msm_obj->lock);
836 
837 	put_iova(obj);
838 
839 	if (obj->import_attach) {
840 		if (msm_obj->vaddr)
841 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
842 
843 		/* Don't drop the pages for imported dmabuf, as they are not
844 		 * ours, just free the array we allocated:
845 		 */
846 		if (msm_obj->pages)
847 			kvfree(msm_obj->pages);
848 
849 		drm_prime_gem_destroy(obj, msm_obj->sgt);
850 	} else {
851 		msm_gem_vunmap_locked(obj);
852 		put_pages(obj);
853 	}
854 
855 	if (msm_obj->resv == &msm_obj->_resv)
856 		reservation_object_fini(msm_obj->resv);
857 
858 	drm_gem_object_release(obj);
859 
860 	mutex_unlock(&msm_obj->lock);
861 	kfree(msm_obj);
862 }
863 
864 /* convenience method to construct a GEM buffer object, and userspace handle */
865 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
866 		uint32_t size, uint32_t flags, uint32_t *handle)
867 {
868 	struct drm_gem_object *obj;
869 	int ret;
870 
871 	obj = msm_gem_new(dev, size, flags);
872 
873 	if (IS_ERR(obj))
874 		return PTR_ERR(obj);
875 
876 	ret = drm_gem_handle_create(file, obj, handle);
877 
878 	/* drop reference from allocate - handle holds it now */
879 	drm_gem_object_put_unlocked(obj);
880 
881 	return ret;
882 }
883 
884 static int msm_gem_new_impl(struct drm_device *dev,
885 		uint32_t size, uint32_t flags,
886 		struct reservation_object *resv,
887 		struct drm_gem_object **obj,
888 		bool struct_mutex_locked)
889 {
890 	struct msm_drm_private *priv = dev->dev_private;
891 	struct msm_gem_object *msm_obj;
892 
893 	switch (flags & MSM_BO_CACHE_MASK) {
894 	case MSM_BO_UNCACHED:
895 	case MSM_BO_CACHED:
896 	case MSM_BO_WC:
897 		break;
898 	default:
899 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
900 				(flags & MSM_BO_CACHE_MASK));
901 		return -EINVAL;
902 	}
903 
904 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
905 	if (!msm_obj)
906 		return -ENOMEM;
907 
908 	mutex_init(&msm_obj->lock);
909 
910 	msm_obj->flags = flags;
911 	msm_obj->madv = MSM_MADV_WILLNEED;
912 
913 	if (resv) {
914 		msm_obj->resv = resv;
915 	} else {
916 		msm_obj->resv = &msm_obj->_resv;
917 		reservation_object_init(msm_obj->resv);
918 	}
919 
920 	INIT_LIST_HEAD(&msm_obj->submit_entry);
921 	INIT_LIST_HEAD(&msm_obj->vmas);
922 
923 	if (struct_mutex_locked) {
924 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
925 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
926 	} else {
927 		mutex_lock(&dev->struct_mutex);
928 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
929 		mutex_unlock(&dev->struct_mutex);
930 	}
931 
932 	*obj = &msm_obj->base;
933 
934 	return 0;
935 }
936 
937 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
938 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
939 {
940 	struct msm_drm_private *priv = dev->dev_private;
941 	struct drm_gem_object *obj = NULL;
942 	bool use_vram = false;
943 	int ret;
944 
945 	size = PAGE_ALIGN(size);
946 
947 	if (!iommu_present(&platform_bus_type))
948 		use_vram = true;
949 	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
950 		use_vram = true;
951 
952 	if (WARN_ON(use_vram && !priv->vram.size))
953 		return ERR_PTR(-EINVAL);
954 
955 	/* Disallow zero sized objects as they make the underlying
956 	 * infrastructure grumpy
957 	 */
958 	if (size == 0)
959 		return ERR_PTR(-EINVAL);
960 
961 	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
962 	if (ret)
963 		goto fail;
964 
965 	if (use_vram) {
966 		struct msm_gem_vma *vma;
967 		struct page **pages;
968 		struct msm_gem_object *msm_obj = to_msm_bo(obj);
969 
970 		mutex_lock(&msm_obj->lock);
971 
972 		vma = add_vma(obj, NULL);
973 		mutex_unlock(&msm_obj->lock);
974 		if (IS_ERR(vma)) {
975 			ret = PTR_ERR(vma);
976 			goto fail;
977 		}
978 
979 		to_msm_bo(obj)->vram_node = &vma->node;
980 
981 		drm_gem_private_object_init(dev, obj, size);
982 
983 		pages = get_pages(obj);
984 		if (IS_ERR(pages)) {
985 			ret = PTR_ERR(pages);
986 			goto fail;
987 		}
988 
989 		vma->iova = physaddr(obj);
990 	} else {
991 		ret = drm_gem_object_init(dev, obj, size);
992 		if (ret)
993 			goto fail;
994 	}
995 
996 	return obj;
997 
998 fail:
999 	drm_gem_object_put_unlocked(obj);
1000 	return ERR_PTR(ret);
1001 }
1002 
1003 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1004 		uint32_t size, uint32_t flags)
1005 {
1006 	return _msm_gem_new(dev, size, flags, true);
1007 }
1008 
1009 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1010 		uint32_t size, uint32_t flags)
1011 {
1012 	return _msm_gem_new(dev, size, flags, false);
1013 }
1014 
1015 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1016 		struct dma_buf *dmabuf, struct sg_table *sgt)
1017 {
1018 	struct msm_gem_object *msm_obj;
1019 	struct drm_gem_object *obj;
1020 	uint32_t size;
1021 	int ret, npages;
1022 
1023 	/* if we don't have IOMMU, don't bother pretending we can import: */
1024 	if (!iommu_present(&platform_bus_type)) {
1025 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1026 		return ERR_PTR(-EINVAL);
1027 	}
1028 
1029 	size = PAGE_ALIGN(dmabuf->size);
1030 
1031 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1032 	if (ret)
1033 		goto fail;
1034 
1035 	drm_gem_private_object_init(dev, obj, size);
1036 
1037 	npages = size / PAGE_SIZE;
1038 
1039 	msm_obj = to_msm_bo(obj);
1040 	mutex_lock(&msm_obj->lock);
1041 	msm_obj->sgt = sgt;
1042 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1043 	if (!msm_obj->pages) {
1044 		mutex_unlock(&msm_obj->lock);
1045 		ret = -ENOMEM;
1046 		goto fail;
1047 	}
1048 
1049 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1050 	if (ret) {
1051 		mutex_unlock(&msm_obj->lock);
1052 		goto fail;
1053 	}
1054 
1055 	mutex_unlock(&msm_obj->lock);
1056 	return obj;
1057 
1058 fail:
1059 	drm_gem_object_put_unlocked(obj);
1060 	return ERR_PTR(ret);
1061 }
1062 
1063 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1064 		uint32_t flags, struct msm_gem_address_space *aspace,
1065 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1066 {
1067 	void *vaddr;
1068 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1069 	int ret;
1070 
1071 	if (IS_ERR(obj))
1072 		return ERR_CAST(obj);
1073 
1074 	if (iova) {
1075 		ret = msm_gem_get_iova(obj, aspace, iova);
1076 		if (ret)
1077 			goto err;
1078 	}
1079 
1080 	vaddr = msm_gem_get_vaddr(obj);
1081 	if (IS_ERR(vaddr)) {
1082 		msm_gem_put_iova(obj, aspace);
1083 		ret = PTR_ERR(vaddr);
1084 		goto err;
1085 	}
1086 
1087 	if (bo)
1088 		*bo = obj;
1089 
1090 	return vaddr;
1091 err:
1092 	if (locked)
1093 		drm_gem_object_put(obj);
1094 	else
1095 		drm_gem_object_put_unlocked(obj);
1096 
1097 	return ERR_PTR(ret);
1098 
1099 }
1100 
1101 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1102 		uint32_t flags, struct msm_gem_address_space *aspace,
1103 		struct drm_gem_object **bo, uint64_t *iova)
1104 {
1105 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1106 }
1107 
1108 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1109 		uint32_t flags, struct msm_gem_address_space *aspace,
1110 		struct drm_gem_object **bo, uint64_t *iova)
1111 {
1112 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1113 }
1114 
1115 void msm_gem_kernel_put(struct drm_gem_object *bo,
1116 		struct msm_gem_address_space *aspace, bool locked)
1117 {
1118 	if (IS_ERR_OR_NULL(bo))
1119 		return;
1120 
1121 	msm_gem_put_vaddr(bo);
1122 	msm_gem_put_iova(bo, aspace);
1123 
1124 	if (locked)
1125 		drm_gem_object_put(bo);
1126 	else
1127 		drm_gem_object_put_unlocked(bo);
1128 }
1129