xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 4b85f7f5)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
22 
23 #include "msm_drv.h"
24 #include "msm_fence.h"
25 #include "msm_gem.h"
26 #include "msm_gpu.h"
27 #include "msm_mmu.h"
28 
29 static dma_addr_t physaddr(struct drm_gem_object *obj)
30 {
31 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 	struct msm_drm_private *priv = obj->dev->dev_private;
33 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 			priv->vram.paddr;
35 }
36 
37 static bool use_pages(struct drm_gem_object *obj)
38 {
39 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 	return !msm_obj->vram_node;
41 }
42 
43 /* allocate pages from VRAM carveout, used when no IOMMU: */
44 static struct page **get_pages_vram(struct drm_gem_object *obj,
45 		int npages)
46 {
47 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 	struct msm_drm_private *priv = obj->dev->dev_private;
49 	dma_addr_t paddr;
50 	struct page **p;
51 	int ret, i;
52 
53 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
54 	if (!p)
55 		return ERR_PTR(-ENOMEM);
56 
57 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
58 	if (ret) {
59 		kvfree(p);
60 		return ERR_PTR(ret);
61 	}
62 
63 	paddr = physaddr(obj);
64 	for (i = 0; i < npages; i++) {
65 		p[i] = phys_to_page(paddr);
66 		paddr += PAGE_SIZE;
67 	}
68 
69 	return p;
70 }
71 
72 /* called with dev->struct_mutex held */
73 static struct page **get_pages(struct drm_gem_object *obj)
74 {
75 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
76 
77 	if (!msm_obj->pages) {
78 		struct drm_device *dev = obj->dev;
79 		struct page **p;
80 		int npages = obj->size >> PAGE_SHIFT;
81 
82 		if (use_pages(obj))
83 			p = drm_gem_get_pages(obj);
84 		else
85 			p = get_pages_vram(obj, npages);
86 
87 		if (IS_ERR(p)) {
88 			dev_err(dev->dev, "could not get pages: %ld\n",
89 					PTR_ERR(p));
90 			return p;
91 		}
92 
93 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
94 		if (IS_ERR(msm_obj->sgt)) {
95 			dev_err(dev->dev, "failed to allocate sgt\n");
96 			return ERR_CAST(msm_obj->sgt);
97 		}
98 
99 		msm_obj->pages = p;
100 
101 		/* For non-cached buffers, ensure the new pages are clean
102 		 * because display controller, GPU, etc. are not coherent:
103 		 */
104 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
105 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
106 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
107 	}
108 
109 	return msm_obj->pages;
110 }
111 
112 static void put_pages(struct drm_gem_object *obj)
113 {
114 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
115 
116 	if (msm_obj->pages) {
117 		/* For non-cached buffers, ensure the new pages are clean
118 		 * because display controller, GPU, etc. are not coherent:
119 		 */
120 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
121 			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
122 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
123 		sg_free_table(msm_obj->sgt);
124 		kfree(msm_obj->sgt);
125 
126 		if (use_pages(obj))
127 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
128 		else {
129 			drm_mm_remove_node(msm_obj->vram_node);
130 			kvfree(msm_obj->pages);
131 		}
132 
133 		msm_obj->pages = NULL;
134 	}
135 }
136 
137 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
138 {
139 	struct drm_device *dev = obj->dev;
140 	struct page **p;
141 	mutex_lock(&dev->struct_mutex);
142 	p = get_pages(obj);
143 	mutex_unlock(&dev->struct_mutex);
144 	return p;
145 }
146 
147 void msm_gem_put_pages(struct drm_gem_object *obj)
148 {
149 	/* when we start tracking the pin count, then do something here */
150 }
151 
152 int msm_gem_mmap_obj(struct drm_gem_object *obj,
153 		struct vm_area_struct *vma)
154 {
155 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156 
157 	vma->vm_flags &= ~VM_PFNMAP;
158 	vma->vm_flags |= VM_MIXEDMAP;
159 
160 	if (msm_obj->flags & MSM_BO_WC) {
161 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
162 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
163 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
164 	} else {
165 		/*
166 		 * Shunt off cached objs to shmem file so they have their own
167 		 * address_space (so unmap_mapping_range does what we want,
168 		 * in particular in the case of mmap'd dmabufs)
169 		 */
170 		fput(vma->vm_file);
171 		get_file(obj->filp);
172 		vma->vm_pgoff = 0;
173 		vma->vm_file  = obj->filp;
174 
175 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
176 	}
177 
178 	return 0;
179 }
180 
181 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
182 {
183 	int ret;
184 
185 	ret = drm_gem_mmap(filp, vma);
186 	if (ret) {
187 		DBG("mmap failed: %d", ret);
188 		return ret;
189 	}
190 
191 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
192 }
193 
194 int msm_gem_fault(struct vm_fault *vmf)
195 {
196 	struct vm_area_struct *vma = vmf->vma;
197 	struct drm_gem_object *obj = vma->vm_private_data;
198 	struct drm_device *dev = obj->dev;
199 	struct msm_drm_private *priv = dev->dev_private;
200 	struct page **pages;
201 	unsigned long pfn;
202 	pgoff_t pgoff;
203 	int ret;
204 
205 	/* This should only happen if userspace tries to pass a mmap'd
206 	 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 	 * a page fault while struct_mutex is already held.  This is
208 	 * not a valid use-case so just bail.
209 	 */
210 	if (priv->struct_mutex_task == current)
211 		return VM_FAULT_SIGBUS;
212 
213 	/* Make sure we don't parallel update on a fault, nor move or remove
214 	 * something from beneath our feet
215 	 */
216 	ret = mutex_lock_interruptible(&dev->struct_mutex);
217 	if (ret)
218 		goto out;
219 
220 	/* make sure we have pages attached now */
221 	pages = get_pages(obj);
222 	if (IS_ERR(pages)) {
223 		ret = PTR_ERR(pages);
224 		goto out_unlock;
225 	}
226 
227 	/* We don't use vmf->pgoff since that has the fake offset: */
228 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
229 
230 	pfn = page_to_pfn(pages[pgoff]);
231 
232 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
233 			pfn, pfn << PAGE_SHIFT);
234 
235 	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
236 
237 out_unlock:
238 	mutex_unlock(&dev->struct_mutex);
239 out:
240 	switch (ret) {
241 	case -EAGAIN:
242 	case 0:
243 	case -ERESTARTSYS:
244 	case -EINTR:
245 	case -EBUSY:
246 		/*
247 		 * EBUSY is ok: this just means that another thread
248 		 * already did the job.
249 		 */
250 		return VM_FAULT_NOPAGE;
251 	case -ENOMEM:
252 		return VM_FAULT_OOM;
253 	default:
254 		return VM_FAULT_SIGBUS;
255 	}
256 }
257 
258 /** get mmap offset */
259 static uint64_t mmap_offset(struct drm_gem_object *obj)
260 {
261 	struct drm_device *dev = obj->dev;
262 	int ret;
263 
264 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
265 
266 	/* Make it mmapable */
267 	ret = drm_gem_create_mmap_offset(obj);
268 
269 	if (ret) {
270 		dev_err(dev->dev, "could not allocate mmap offset\n");
271 		return 0;
272 	}
273 
274 	return drm_vma_node_offset_addr(&obj->vma_node);
275 }
276 
277 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
278 {
279 	uint64_t offset;
280 	mutex_lock(&obj->dev->struct_mutex);
281 	offset = mmap_offset(obj);
282 	mutex_unlock(&obj->dev->struct_mutex);
283 	return offset;
284 }
285 
286 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
287 		struct msm_gem_address_space *aspace)
288 {
289 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
290 	struct msm_gem_vma *vma;
291 
292 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
293 	if (!vma)
294 		return ERR_PTR(-ENOMEM);
295 
296 	vma->aspace = aspace;
297 
298 	list_add_tail(&vma->list, &msm_obj->vmas);
299 
300 	return vma;
301 }
302 
303 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
304 		struct msm_gem_address_space *aspace)
305 {
306 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
307 	struct msm_gem_vma *vma;
308 
309 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
310 
311 	list_for_each_entry(vma, &msm_obj->vmas, list) {
312 		if (vma->aspace == aspace)
313 			return vma;
314 	}
315 
316 	return NULL;
317 }
318 
319 static void del_vma(struct msm_gem_vma *vma)
320 {
321 	if (!vma)
322 		return;
323 
324 	list_del(&vma->list);
325 	kfree(vma);
326 }
327 
328 static void
329 put_iova(struct drm_gem_object *obj)
330 {
331 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
332 	struct msm_gem_vma *vma, *tmp;
333 
334 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
335 
336 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
337 		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
338 		del_vma(vma);
339 	}
340 }
341 
342 /* should be called under struct_mutex.. although it can be called
343  * from atomic context without struct_mutex to acquire an extra
344  * iova ref if you know one is already held.
345  *
346  * That means when I do eventually need to add support for unpinning
347  * the refcnt counter needs to be atomic_t.
348  */
349 int msm_gem_get_iova_locked(struct drm_gem_object *obj,
350 		struct msm_gem_address_space *aspace, uint64_t *iova)
351 {
352 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
353 	struct msm_gem_vma *vma;
354 	int ret = 0;
355 
356 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
357 
358 	vma = lookup_vma(obj, aspace);
359 
360 	if (!vma) {
361 		struct page **pages;
362 
363 		vma = add_vma(obj, aspace);
364 		if (IS_ERR(vma))
365 			return PTR_ERR(vma);
366 
367 		pages = get_pages(obj);
368 		if (IS_ERR(pages)) {
369 			ret = PTR_ERR(pages);
370 			goto fail;
371 		}
372 
373 		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
374 				obj->size >> PAGE_SHIFT);
375 		if (ret)
376 			goto fail;
377 	}
378 
379 	*iova = vma->iova;
380 	return 0;
381 
382 fail:
383 	del_vma(vma);
384 
385 	return ret;
386 }
387 
388 /* get iova, taking a reference.  Should have a matching put */
389 int msm_gem_get_iova(struct drm_gem_object *obj,
390 		struct msm_gem_address_space *aspace, uint64_t *iova)
391 {
392 	int ret;
393 
394 	mutex_lock(&obj->dev->struct_mutex);
395 	ret = msm_gem_get_iova_locked(obj, aspace, iova);
396 	mutex_unlock(&obj->dev->struct_mutex);
397 
398 	return ret;
399 }
400 
401 /* get iova without taking a reference, used in places where you have
402  * already done a 'msm_gem_get_iova()'.
403  */
404 uint64_t msm_gem_iova(struct drm_gem_object *obj,
405 		struct msm_gem_address_space *aspace)
406 {
407 	struct msm_gem_vma *vma;
408 
409 	mutex_lock(&obj->dev->struct_mutex);
410 	vma = lookup_vma(obj, aspace);
411 	mutex_unlock(&obj->dev->struct_mutex);
412 	WARN_ON(!vma);
413 
414 	return vma ? vma->iova : 0;
415 }
416 
417 void msm_gem_put_iova(struct drm_gem_object *obj,
418 		struct msm_gem_address_space *aspace)
419 {
420 	// XXX TODO ..
421 	// NOTE: probably don't need a _locked() version.. we wouldn't
422 	// normally unmap here, but instead just mark that it could be
423 	// unmapped (if the iova refcnt drops to zero), but then later
424 	// if another _get_iova_locked() fails we can start unmapping
425 	// things that are no longer needed..
426 }
427 
428 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
429 		struct drm_mode_create_dumb *args)
430 {
431 	args->pitch = align_pitch(args->width, args->bpp);
432 	args->size  = PAGE_ALIGN(args->pitch * args->height);
433 	return msm_gem_new_handle(dev, file, args->size,
434 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
435 }
436 
437 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
438 		uint32_t handle, uint64_t *offset)
439 {
440 	struct drm_gem_object *obj;
441 	int ret = 0;
442 
443 	/* GEM does all our handle to object mapping */
444 	obj = drm_gem_object_lookup(file, handle);
445 	if (obj == NULL) {
446 		ret = -ENOENT;
447 		goto fail;
448 	}
449 
450 	*offset = msm_gem_mmap_offset(obj);
451 
452 	drm_gem_object_unreference_unlocked(obj);
453 
454 fail:
455 	return ret;
456 }
457 
458 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
459 {
460 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
461 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
462 	if (!msm_obj->vaddr) {
463 		struct page **pages = get_pages(obj);
464 		if (IS_ERR(pages))
465 			return ERR_CAST(pages);
466 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
467 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
468 		if (msm_obj->vaddr == NULL)
469 			return ERR_PTR(-ENOMEM);
470 	}
471 	msm_obj->vmap_count++;
472 	return msm_obj->vaddr;
473 }
474 
475 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
476 {
477 	void *ret;
478 	mutex_lock(&obj->dev->struct_mutex);
479 	ret = msm_gem_get_vaddr_locked(obj);
480 	mutex_unlock(&obj->dev->struct_mutex);
481 	return ret;
482 }
483 
484 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
485 {
486 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
487 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
488 	WARN_ON(msm_obj->vmap_count < 1);
489 	msm_obj->vmap_count--;
490 }
491 
492 void msm_gem_put_vaddr(struct drm_gem_object *obj)
493 {
494 	mutex_lock(&obj->dev->struct_mutex);
495 	msm_gem_put_vaddr_locked(obj);
496 	mutex_unlock(&obj->dev->struct_mutex);
497 }
498 
499 /* Update madvise status, returns true if not purged, else
500  * false or -errno.
501  */
502 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
503 {
504 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
505 
506 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
507 
508 	if (msm_obj->madv != __MSM_MADV_PURGED)
509 		msm_obj->madv = madv;
510 
511 	return (msm_obj->madv != __MSM_MADV_PURGED);
512 }
513 
514 void msm_gem_purge(struct drm_gem_object *obj)
515 {
516 	struct drm_device *dev = obj->dev;
517 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
518 
519 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
520 	WARN_ON(!is_purgeable(msm_obj));
521 	WARN_ON(obj->import_attach);
522 
523 	put_iova(obj);
524 
525 	msm_gem_vunmap(obj);
526 
527 	put_pages(obj);
528 
529 	msm_obj->madv = __MSM_MADV_PURGED;
530 
531 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
532 	drm_gem_free_mmap_offset(obj);
533 
534 	/* Our goal here is to return as much of the memory as
535 	 * is possible back to the system as we are called from OOM.
536 	 * To do this we must instruct the shmfs to drop all of its
537 	 * backing pages, *now*.
538 	 */
539 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
540 
541 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
542 			0, (loff_t)-1);
543 }
544 
545 void msm_gem_vunmap(struct drm_gem_object *obj)
546 {
547 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
548 
549 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
550 		return;
551 
552 	vunmap(msm_obj->vaddr);
553 	msm_obj->vaddr = NULL;
554 }
555 
556 /* must be called before _move_to_active().. */
557 int msm_gem_sync_object(struct drm_gem_object *obj,
558 		struct msm_fence_context *fctx, bool exclusive)
559 {
560 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
561 	struct reservation_object_list *fobj;
562 	struct dma_fence *fence;
563 	int i, ret;
564 
565 	if (!exclusive) {
566 		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
567 		 * which makes this a slightly strange place to call it.  OTOH this
568 		 * is a convenient can-fail point to hook it in.  (And similar to
569 		 * how etnaviv and nouveau handle this.)
570 		 */
571 		ret = reservation_object_reserve_shared(msm_obj->resv);
572 		if (ret)
573 			return ret;
574 	}
575 
576 	fobj = reservation_object_get_list(msm_obj->resv);
577 	if (!fobj || (fobj->shared_count == 0)) {
578 		fence = reservation_object_get_excl(msm_obj->resv);
579 		/* don't need to wait on our own fences, since ring is fifo */
580 		if (fence && (fence->context != fctx->context)) {
581 			ret = dma_fence_wait(fence, true);
582 			if (ret)
583 				return ret;
584 		}
585 	}
586 
587 	if (!exclusive || !fobj)
588 		return 0;
589 
590 	for (i = 0; i < fobj->shared_count; i++) {
591 		fence = rcu_dereference_protected(fobj->shared[i],
592 						reservation_object_held(msm_obj->resv));
593 		if (fence->context != fctx->context) {
594 			ret = dma_fence_wait(fence, true);
595 			if (ret)
596 				return ret;
597 		}
598 	}
599 
600 	return 0;
601 }
602 
603 void msm_gem_move_to_active(struct drm_gem_object *obj,
604 		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
605 {
606 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
607 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
608 	msm_obj->gpu = gpu;
609 	if (exclusive)
610 		reservation_object_add_excl_fence(msm_obj->resv, fence);
611 	else
612 		reservation_object_add_shared_fence(msm_obj->resv, fence);
613 	list_del_init(&msm_obj->mm_list);
614 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
615 }
616 
617 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
618 {
619 	struct drm_device *dev = obj->dev;
620 	struct msm_drm_private *priv = dev->dev_private;
621 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
622 
623 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
624 
625 	msm_obj->gpu = NULL;
626 	list_del_init(&msm_obj->mm_list);
627 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
628 }
629 
630 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
631 {
632 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
633 	bool write = !!(op & MSM_PREP_WRITE);
634 	unsigned long remain =
635 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
636 	long ret;
637 
638 	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
639 						  true,  remain);
640 	if (ret == 0)
641 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
642 	else if (ret < 0)
643 		return ret;
644 
645 	/* TODO cache maintenance */
646 
647 	return 0;
648 }
649 
650 int msm_gem_cpu_fini(struct drm_gem_object *obj)
651 {
652 	/* TODO cache maintenance */
653 	return 0;
654 }
655 
656 #ifdef CONFIG_DEBUG_FS
657 static void describe_fence(struct dma_fence *fence, const char *type,
658 		struct seq_file *m)
659 {
660 	if (!dma_fence_is_signaled(fence))
661 		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
662 				fence->ops->get_driver_name(fence),
663 				fence->ops->get_timeline_name(fence),
664 				fence->seqno);
665 }
666 
667 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
668 {
669 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
670 	struct reservation_object *robj = msm_obj->resv;
671 	struct reservation_object_list *fobj;
672 	struct dma_fence *fence;
673 	struct msm_gem_vma *vma;
674 	uint64_t off = drm_vma_node_start(&obj->vma_node);
675 	const char *madv;
676 
677 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
678 
679 	switch (msm_obj->madv) {
680 	case __MSM_MADV_PURGED:
681 		madv = " purged";
682 		break;
683 	case MSM_MADV_DONTNEED:
684 		madv = " purgeable";
685 		break;
686 	case MSM_MADV_WILLNEED:
687 	default:
688 		madv = "";
689 		break;
690 	}
691 
692 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
693 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
694 			obj->name, kref_read(&obj->refcount),
695 			off, msm_obj->vaddr);
696 
697 	/* FIXME: we need to print the address space here too */
698 	list_for_each_entry(vma, &msm_obj->vmas, list)
699 		seq_printf(m, " %08llx", vma->iova);
700 
701 	seq_printf(m, " %zu%s\n", obj->size, madv);
702 
703 	rcu_read_lock();
704 	fobj = rcu_dereference(robj->fence);
705 	if (fobj) {
706 		unsigned int i, shared_count = fobj->shared_count;
707 
708 		for (i = 0; i < shared_count; i++) {
709 			fence = rcu_dereference(fobj->shared[i]);
710 			describe_fence(fence, "Shared", m);
711 		}
712 	}
713 
714 	fence = rcu_dereference(robj->fence_excl);
715 	if (fence)
716 		describe_fence(fence, "Exclusive", m);
717 	rcu_read_unlock();
718 }
719 
720 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
721 {
722 	struct msm_gem_object *msm_obj;
723 	int count = 0;
724 	size_t size = 0;
725 
726 	list_for_each_entry(msm_obj, list, mm_list) {
727 		struct drm_gem_object *obj = &msm_obj->base;
728 		seq_printf(m, "   ");
729 		msm_gem_describe(obj, m);
730 		count++;
731 		size += obj->size;
732 	}
733 
734 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
735 }
736 #endif
737 
738 void msm_gem_free_object(struct drm_gem_object *obj)
739 {
740 	struct drm_device *dev = obj->dev;
741 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
742 
743 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
744 
745 	/* object should not be on active list: */
746 	WARN_ON(is_active(msm_obj));
747 
748 	list_del(&msm_obj->mm_list);
749 
750 	put_iova(obj);
751 
752 	if (obj->import_attach) {
753 		if (msm_obj->vaddr)
754 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
755 
756 		/* Don't drop the pages for imported dmabuf, as they are not
757 		 * ours, just free the array we allocated:
758 		 */
759 		if (msm_obj->pages)
760 			kvfree(msm_obj->pages);
761 
762 		drm_prime_gem_destroy(obj, msm_obj->sgt);
763 	} else {
764 		msm_gem_vunmap(obj);
765 		put_pages(obj);
766 	}
767 
768 	if (msm_obj->resv == &msm_obj->_resv)
769 		reservation_object_fini(msm_obj->resv);
770 
771 	drm_gem_object_release(obj);
772 
773 	kfree(msm_obj);
774 }
775 
776 /* convenience method to construct a GEM buffer object, and userspace handle */
777 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
778 		uint32_t size, uint32_t flags, uint32_t *handle)
779 {
780 	struct drm_gem_object *obj;
781 	int ret;
782 
783 	ret = mutex_lock_interruptible(&dev->struct_mutex);
784 	if (ret)
785 		return ret;
786 
787 	obj = msm_gem_new(dev, size, flags);
788 
789 	mutex_unlock(&dev->struct_mutex);
790 
791 	if (IS_ERR(obj))
792 		return PTR_ERR(obj);
793 
794 	ret = drm_gem_handle_create(file, obj, handle);
795 
796 	/* drop reference from allocate - handle holds it now */
797 	drm_gem_object_unreference_unlocked(obj);
798 
799 	return ret;
800 }
801 
802 static int msm_gem_new_impl(struct drm_device *dev,
803 		uint32_t size, uint32_t flags,
804 		struct reservation_object *resv,
805 		struct drm_gem_object **obj)
806 {
807 	struct msm_drm_private *priv = dev->dev_private;
808 	struct msm_gem_object *msm_obj;
809 
810 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
811 
812 	switch (flags & MSM_BO_CACHE_MASK) {
813 	case MSM_BO_UNCACHED:
814 	case MSM_BO_CACHED:
815 	case MSM_BO_WC:
816 		break;
817 	default:
818 		dev_err(dev->dev, "invalid cache flag: %x\n",
819 				(flags & MSM_BO_CACHE_MASK));
820 		return -EINVAL;
821 	}
822 
823 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
824 	if (!msm_obj)
825 		return -ENOMEM;
826 
827 	msm_obj->flags = flags;
828 	msm_obj->madv = MSM_MADV_WILLNEED;
829 
830 	if (resv) {
831 		msm_obj->resv = resv;
832 	} else {
833 		msm_obj->resv = &msm_obj->_resv;
834 		reservation_object_init(msm_obj->resv);
835 	}
836 
837 	INIT_LIST_HEAD(&msm_obj->submit_entry);
838 	INIT_LIST_HEAD(&msm_obj->vmas);
839 
840 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
841 
842 	*obj = &msm_obj->base;
843 
844 	return 0;
845 }
846 
847 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
848 		uint32_t size, uint32_t flags)
849 {
850 	struct msm_drm_private *priv = dev->dev_private;
851 	struct drm_gem_object *obj = NULL;
852 	bool use_vram = false;
853 	int ret;
854 
855 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
856 
857 	size = PAGE_ALIGN(size);
858 
859 	if (!iommu_present(&platform_bus_type))
860 		use_vram = true;
861 	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
862 		use_vram = true;
863 
864 	if (WARN_ON(use_vram && !priv->vram.size))
865 		return ERR_PTR(-EINVAL);
866 
867 	/* Disallow zero sized objects as they make the underlying
868 	 * infrastructure grumpy
869 	 */
870 	if (size == 0)
871 		return ERR_PTR(-EINVAL);
872 
873 	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
874 	if (ret)
875 		goto fail;
876 
877 	if (use_vram) {
878 		struct msm_gem_vma *vma;
879 		struct page **pages;
880 
881 		vma = add_vma(obj, NULL);
882 		if (IS_ERR(vma)) {
883 			ret = PTR_ERR(vma);
884 			goto fail;
885 		}
886 
887 		to_msm_bo(obj)->vram_node = &vma->node;
888 
889 		drm_gem_private_object_init(dev, obj, size);
890 
891 		pages = get_pages(obj);
892 		if (IS_ERR(pages)) {
893 			ret = PTR_ERR(pages);
894 			goto fail;
895 		}
896 
897 		vma->iova = physaddr(obj);
898 	} else {
899 		ret = drm_gem_object_init(dev, obj, size);
900 		if (ret)
901 			goto fail;
902 	}
903 
904 	return obj;
905 
906 fail:
907 	drm_gem_object_unreference(obj);
908 	return ERR_PTR(ret);
909 }
910 
911 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
912 		struct dma_buf *dmabuf, struct sg_table *sgt)
913 {
914 	struct msm_gem_object *msm_obj;
915 	struct drm_gem_object *obj;
916 	uint32_t size;
917 	int ret, npages;
918 
919 	/* if we don't have IOMMU, don't bother pretending we can import: */
920 	if (!iommu_present(&platform_bus_type)) {
921 		dev_err(dev->dev, "cannot import without IOMMU\n");
922 		return ERR_PTR(-EINVAL);
923 	}
924 
925 	size = PAGE_ALIGN(dmabuf->size);
926 
927 	/* Take mutex so we can modify the inactive list in msm_gem_new_impl */
928 	mutex_lock(&dev->struct_mutex);
929 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
930 	mutex_unlock(&dev->struct_mutex);
931 
932 	if (ret)
933 		goto fail;
934 
935 	drm_gem_private_object_init(dev, obj, size);
936 
937 	npages = size / PAGE_SIZE;
938 
939 	msm_obj = to_msm_bo(obj);
940 	msm_obj->sgt = sgt;
941 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
942 	if (!msm_obj->pages) {
943 		ret = -ENOMEM;
944 		goto fail;
945 	}
946 
947 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
948 	if (ret)
949 		goto fail;
950 
951 	return obj;
952 
953 fail:
954 	drm_gem_object_unreference_unlocked(obj);
955 	return ERR_PTR(ret);
956 }
957