xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision fc2f0756)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21 
22 static void update_lru(struct drm_gem_object *obj);
23 
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 {
26 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 	struct msm_drm_private *priv = obj->dev->dev_private;
28 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 			priv->vram.paddr;
30 }
31 
32 static bool use_pages(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	return !msm_obj->vram_node;
36 }
37 
38 /*
39  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40  * API.  Really GPU cache is out of scope here (handled on cmdstream)
41  * and all we need to do is invalidate newly allocated pages before
42  * mapping to CPU as uncached/writecombine.
43  *
44  * On top of this, we have the added headache, that depending on
45  * display generation, the display's iommu may be wired up to either
46  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47  * that here we either have dma-direct or iommu ops.
48  *
49  * Let this be a cautionary tail of abstraction gone wrong.
50  */
51 
52 static void sync_for_device(struct msm_gem_object *msm_obj)
53 {
54 	struct device *dev = msm_obj->base.dev->dev;
55 
56 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 }
58 
59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 {
61 	struct device *dev = msm_obj->base.dev->dev;
62 
63 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 }
65 
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 {
69 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 	struct msm_drm_private *priv = obj->dev->dev_private;
71 	dma_addr_t paddr;
72 	struct page **p;
73 	int ret, i;
74 
75 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 	if (!p)
77 		return ERR_PTR(-ENOMEM);
78 
79 	spin_lock(&priv->vram.lock);
80 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 	spin_unlock(&priv->vram.lock);
82 	if (ret) {
83 		kvfree(p);
84 		return ERR_PTR(ret);
85 	}
86 
87 	paddr = physaddr(obj);
88 	for (i = 0; i < npages; i++) {
89 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
90 		paddr += PAGE_SIZE;
91 	}
92 
93 	return p;
94 }
95 
96 static struct page **get_pages(struct drm_gem_object *obj)
97 {
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 
100 	msm_gem_assert_locked(obj);
101 
102 	if (!msm_obj->pages) {
103 		struct drm_device *dev = obj->dev;
104 		struct page **p;
105 		int npages = obj->size >> PAGE_SHIFT;
106 
107 		if (use_pages(obj))
108 			p = drm_gem_get_pages(obj);
109 		else
110 			p = get_pages_vram(obj, npages);
111 
112 		if (IS_ERR(p)) {
113 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114 					PTR_ERR(p));
115 			return p;
116 		}
117 
118 		msm_obj->pages = p;
119 
120 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 		if (IS_ERR(msm_obj->sgt)) {
122 			void *ptr = ERR_CAST(msm_obj->sgt);
123 
124 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
125 			msm_obj->sgt = NULL;
126 			return ptr;
127 		}
128 
129 		/* For non-cached buffers, ensure the new pages are clean
130 		 * because display controller, GPU, etc. are not coherent:
131 		 */
132 		if (msm_obj->flags & MSM_BO_WC)
133 			sync_for_device(msm_obj);
134 
135 		update_lru(obj);
136 	}
137 
138 	return msm_obj->pages;
139 }
140 
141 static void put_pages_vram(struct drm_gem_object *obj)
142 {
143 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 	struct msm_drm_private *priv = obj->dev->dev_private;
145 
146 	spin_lock(&priv->vram.lock);
147 	drm_mm_remove_node(msm_obj->vram_node);
148 	spin_unlock(&priv->vram.lock);
149 
150 	kvfree(msm_obj->pages);
151 }
152 
153 static void put_pages(struct drm_gem_object *obj)
154 {
155 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156 
157 	if (msm_obj->pages) {
158 		if (msm_obj->sgt) {
159 			/* For non-cached buffers, ensure the new
160 			 * pages are clean because display controller,
161 			 * GPU, etc. are not coherent:
162 			 */
163 			if (msm_obj->flags & MSM_BO_WC)
164 				sync_for_cpu(msm_obj);
165 
166 			sg_free_table(msm_obj->sgt);
167 			kfree(msm_obj->sgt);
168 			msm_obj->sgt = NULL;
169 		}
170 
171 		if (use_pages(obj))
172 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 		else
174 			put_pages_vram(obj);
175 
176 		msm_obj->pages = NULL;
177 		update_lru(obj);
178 	}
179 }
180 
181 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
182 {
183 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
184 	struct page **p;
185 
186 	msm_gem_assert_locked(obj);
187 
188 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
189 		return ERR_PTR(-EBUSY);
190 	}
191 
192 	p = get_pages(obj);
193 	if (!IS_ERR(p)) {
194 		to_msm_bo(obj)->pin_count++;
195 		update_lru(obj);
196 	}
197 
198 	return p;
199 }
200 
201 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
202 {
203 	struct page **p;
204 
205 	msm_gem_lock(obj);
206 	p = msm_gem_pin_pages_locked(obj);
207 	msm_gem_unlock(obj);
208 
209 	return p;
210 }
211 
212 void msm_gem_unpin_pages(struct drm_gem_object *obj)
213 {
214 	msm_gem_lock(obj);
215 	msm_gem_unpin_locked(obj);
216 	msm_gem_unlock(obj);
217 }
218 
219 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
220 {
221 	if (msm_obj->flags & MSM_BO_WC)
222 		return pgprot_writecombine(prot);
223 	return prot;
224 }
225 
226 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
227 {
228 	struct vm_area_struct *vma = vmf->vma;
229 	struct drm_gem_object *obj = vma->vm_private_data;
230 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
231 	struct page **pages;
232 	unsigned long pfn;
233 	pgoff_t pgoff;
234 	int err;
235 	vm_fault_t ret;
236 
237 	/*
238 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
239 	 * a reference on obj. So, we dont need to hold one here.
240 	 */
241 	err = msm_gem_lock_interruptible(obj);
242 	if (err) {
243 		ret = VM_FAULT_NOPAGE;
244 		goto out;
245 	}
246 
247 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
248 		msm_gem_unlock(obj);
249 		return VM_FAULT_SIGBUS;
250 	}
251 
252 	/* make sure we have pages attached now */
253 	pages = get_pages(obj);
254 	if (IS_ERR(pages)) {
255 		ret = vmf_error(PTR_ERR(pages));
256 		goto out_unlock;
257 	}
258 
259 	/* We don't use vmf->pgoff since that has the fake offset: */
260 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
261 
262 	pfn = page_to_pfn(pages[pgoff]);
263 
264 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
265 			pfn, pfn << PAGE_SHIFT);
266 
267 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
268 
269 out_unlock:
270 	msm_gem_unlock(obj);
271 out:
272 	return ret;
273 }
274 
275 /** get mmap offset */
276 static uint64_t mmap_offset(struct drm_gem_object *obj)
277 {
278 	struct drm_device *dev = obj->dev;
279 	int ret;
280 
281 	msm_gem_assert_locked(obj);
282 
283 	/* Make it mmapable */
284 	ret = drm_gem_create_mmap_offset(obj);
285 
286 	if (ret) {
287 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
288 		return 0;
289 	}
290 
291 	return drm_vma_node_offset_addr(&obj->vma_node);
292 }
293 
294 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
295 {
296 	uint64_t offset;
297 
298 	msm_gem_lock(obj);
299 	offset = mmap_offset(obj);
300 	msm_gem_unlock(obj);
301 	return offset;
302 }
303 
304 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
305 		struct msm_gem_address_space *aspace)
306 {
307 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
308 	struct msm_gem_vma *vma;
309 
310 	msm_gem_assert_locked(obj);
311 
312 	vma = msm_gem_vma_new(aspace);
313 	if (!vma)
314 		return ERR_PTR(-ENOMEM);
315 
316 	list_add_tail(&vma->list, &msm_obj->vmas);
317 
318 	return vma;
319 }
320 
321 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
322 		struct msm_gem_address_space *aspace)
323 {
324 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
325 	struct msm_gem_vma *vma;
326 
327 	msm_gem_assert_locked(obj);
328 
329 	list_for_each_entry(vma, &msm_obj->vmas, list) {
330 		if (vma->aspace == aspace)
331 			return vma;
332 	}
333 
334 	return NULL;
335 }
336 
337 static void del_vma(struct msm_gem_vma *vma)
338 {
339 	if (!vma)
340 		return;
341 
342 	list_del(&vma->list);
343 	kfree(vma);
344 }
345 
346 /*
347  * If close is true, this also closes the VMA (releasing the allocated
348  * iova range) in addition to removing the iommu mapping.  In the eviction
349  * case (!close), we keep the iova allocated, but only remove the iommu
350  * mapping.
351  */
352 static void
353 put_iova_spaces(struct drm_gem_object *obj, bool close)
354 {
355 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
356 	struct msm_gem_vma *vma;
357 
358 	msm_gem_assert_locked(obj);
359 
360 	list_for_each_entry(vma, &msm_obj->vmas, list) {
361 		if (vma->aspace) {
362 			msm_gem_vma_purge(vma);
363 			if (close)
364 				msm_gem_vma_close(vma);
365 		}
366 	}
367 }
368 
369 /* Called with msm_obj locked */
370 static void
371 put_iova_vmas(struct drm_gem_object *obj)
372 {
373 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
374 	struct msm_gem_vma *vma, *tmp;
375 
376 	msm_gem_assert_locked(obj);
377 
378 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
379 		del_vma(vma);
380 	}
381 }
382 
383 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
384 		struct msm_gem_address_space *aspace,
385 		u64 range_start, u64 range_end)
386 {
387 	struct msm_gem_vma *vma;
388 
389 	msm_gem_assert_locked(obj);
390 
391 	vma = lookup_vma(obj, aspace);
392 
393 	if (!vma) {
394 		int ret;
395 
396 		vma = add_vma(obj, aspace);
397 		if (IS_ERR(vma))
398 			return vma;
399 
400 		ret = msm_gem_vma_init(vma, obj->size,
401 			range_start, range_end);
402 		if (ret) {
403 			del_vma(vma);
404 			return ERR_PTR(ret);
405 		}
406 	} else {
407 		GEM_WARN_ON(vma->iova < range_start);
408 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
409 	}
410 
411 	return vma;
412 }
413 
414 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
415 {
416 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
417 	struct page **pages;
418 	int ret, prot = IOMMU_READ;
419 
420 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
421 		prot |= IOMMU_WRITE;
422 
423 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
424 		prot |= IOMMU_PRIV;
425 
426 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
427 		prot |= IOMMU_CACHE;
428 
429 	msm_gem_assert_locked(obj);
430 
431 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
432 		return -EBUSY;
433 
434 	pages = msm_gem_pin_pages_locked(obj);
435 	if (IS_ERR(pages))
436 		return PTR_ERR(pages);
437 
438 	ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
439 	if (ret)
440 		msm_gem_unpin_locked(obj);
441 
442 	return ret;
443 }
444 
445 void msm_gem_unpin_locked(struct drm_gem_object *obj)
446 {
447 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
448 
449 	msm_gem_assert_locked(obj);
450 
451 	msm_obj->pin_count--;
452 	GEM_WARN_ON(msm_obj->pin_count < 0);
453 
454 	update_lru(obj);
455 }
456 
457 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
458 					   struct msm_gem_address_space *aspace)
459 {
460 	return get_vma_locked(obj, aspace, 0, U64_MAX);
461 }
462 
463 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
464 		struct msm_gem_address_space *aspace, uint64_t *iova,
465 		u64 range_start, u64 range_end)
466 {
467 	struct msm_gem_vma *vma;
468 	int ret;
469 
470 	msm_gem_assert_locked(obj);
471 
472 	vma = get_vma_locked(obj, aspace, range_start, range_end);
473 	if (IS_ERR(vma))
474 		return PTR_ERR(vma);
475 
476 	ret = msm_gem_pin_vma_locked(obj, vma);
477 	if (!ret)
478 		*iova = vma->iova;
479 
480 	return ret;
481 }
482 
483 /*
484  * get iova and pin it. Should have a matching put
485  * limits iova to specified range (in pages)
486  */
487 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
488 		struct msm_gem_address_space *aspace, uint64_t *iova,
489 		u64 range_start, u64 range_end)
490 {
491 	int ret;
492 
493 	msm_gem_lock(obj);
494 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
495 	msm_gem_unlock(obj);
496 
497 	return ret;
498 }
499 
500 /* get iova and pin it. Should have a matching put */
501 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
502 		struct msm_gem_address_space *aspace, uint64_t *iova)
503 {
504 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
505 }
506 
507 /*
508  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
509  * valid for the life of the object
510  */
511 int msm_gem_get_iova(struct drm_gem_object *obj,
512 		struct msm_gem_address_space *aspace, uint64_t *iova)
513 {
514 	struct msm_gem_vma *vma;
515 	int ret = 0;
516 
517 	msm_gem_lock(obj);
518 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
519 	if (IS_ERR(vma)) {
520 		ret = PTR_ERR(vma);
521 	} else {
522 		*iova = vma->iova;
523 	}
524 	msm_gem_unlock(obj);
525 
526 	return ret;
527 }
528 
529 static int clear_iova(struct drm_gem_object *obj,
530 		      struct msm_gem_address_space *aspace)
531 {
532 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
533 
534 	if (!vma)
535 		return 0;
536 
537 	if (msm_gem_vma_inuse(vma))
538 		return -EBUSY;
539 
540 	msm_gem_vma_purge(vma);
541 	msm_gem_vma_close(vma);
542 	del_vma(vma);
543 
544 	return 0;
545 }
546 
547 /*
548  * Get the requested iova but don't pin it.  Fails if the requested iova is
549  * not available.  Doesn't need a put because iovas are currently valid for
550  * the life of the object.
551  *
552  * Setting an iova of zero will clear the vma.
553  */
554 int msm_gem_set_iova(struct drm_gem_object *obj,
555 		     struct msm_gem_address_space *aspace, uint64_t iova)
556 {
557 	int ret = 0;
558 
559 	msm_gem_lock(obj);
560 	if (!iova) {
561 		ret = clear_iova(obj, aspace);
562 	} else {
563 		struct msm_gem_vma *vma;
564 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
565 		if (IS_ERR(vma)) {
566 			ret = PTR_ERR(vma);
567 		} else if (GEM_WARN_ON(vma->iova != iova)) {
568 			clear_iova(obj, aspace);
569 			ret = -EBUSY;
570 		}
571 	}
572 	msm_gem_unlock(obj);
573 
574 	return ret;
575 }
576 
577 /*
578  * Unpin a iova by updating the reference counts. The memory isn't actually
579  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
580  * to get rid of it
581  */
582 void msm_gem_unpin_iova(struct drm_gem_object *obj,
583 		struct msm_gem_address_space *aspace)
584 {
585 	struct msm_gem_vma *vma;
586 
587 	msm_gem_lock(obj);
588 	vma = lookup_vma(obj, aspace);
589 	if (!GEM_WARN_ON(!vma)) {
590 		msm_gem_vma_unpin(vma);
591 		msm_gem_unpin_locked(obj);
592 	}
593 	msm_gem_unlock(obj);
594 }
595 
596 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
597 		struct drm_mode_create_dumb *args)
598 {
599 	args->pitch = align_pitch(args->width, args->bpp);
600 	args->size  = PAGE_ALIGN(args->pitch * args->height);
601 	return msm_gem_new_handle(dev, file, args->size,
602 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
603 }
604 
605 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
606 		uint32_t handle, uint64_t *offset)
607 {
608 	struct drm_gem_object *obj;
609 	int ret = 0;
610 
611 	/* GEM does all our handle to object mapping */
612 	obj = drm_gem_object_lookup(file, handle);
613 	if (obj == NULL) {
614 		ret = -ENOENT;
615 		goto fail;
616 	}
617 
618 	*offset = msm_gem_mmap_offset(obj);
619 
620 	drm_gem_object_put(obj);
621 
622 fail:
623 	return ret;
624 }
625 
626 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
627 {
628 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
629 	int ret = 0;
630 
631 	msm_gem_assert_locked(obj);
632 
633 	if (obj->import_attach)
634 		return ERR_PTR(-ENODEV);
635 
636 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
637 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
638 			msm_obj->madv, madv);
639 		return ERR_PTR(-EBUSY);
640 	}
641 
642 	/* increment vmap_count *before* vmap() call, so shrinker can
643 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
644 	 * This guarantees that we won't try to msm_gem_vunmap() this
645 	 * same object from within the vmap() call (while we already
646 	 * hold msm_obj lock)
647 	 */
648 	msm_obj->vmap_count++;
649 
650 	if (!msm_obj->vaddr) {
651 		struct page **pages = get_pages(obj);
652 		if (IS_ERR(pages)) {
653 			ret = PTR_ERR(pages);
654 			goto fail;
655 		}
656 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
657 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
658 		if (msm_obj->vaddr == NULL) {
659 			ret = -ENOMEM;
660 			goto fail;
661 		}
662 
663 		update_lru(obj);
664 	}
665 
666 	return msm_obj->vaddr;
667 
668 fail:
669 	msm_obj->vmap_count--;
670 	return ERR_PTR(ret);
671 }
672 
673 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
674 {
675 	return get_vaddr(obj, MSM_MADV_WILLNEED);
676 }
677 
678 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
679 {
680 	void *ret;
681 
682 	msm_gem_lock(obj);
683 	ret = msm_gem_get_vaddr_locked(obj);
684 	msm_gem_unlock(obj);
685 
686 	return ret;
687 }
688 
689 /*
690  * Don't use this!  It is for the very special case of dumping
691  * submits from GPU hangs or faults, were the bo may already
692  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
693  * active list.
694  */
695 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
696 {
697 	return get_vaddr(obj, __MSM_MADV_PURGED);
698 }
699 
700 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
701 {
702 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
703 
704 	msm_gem_assert_locked(obj);
705 	GEM_WARN_ON(msm_obj->vmap_count < 1);
706 
707 	msm_obj->vmap_count--;
708 }
709 
710 void msm_gem_put_vaddr(struct drm_gem_object *obj)
711 {
712 	msm_gem_lock(obj);
713 	msm_gem_put_vaddr_locked(obj);
714 	msm_gem_unlock(obj);
715 }
716 
717 /* Update madvise status, returns true if not purged, else
718  * false or -errno.
719  */
720 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
721 {
722 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
723 
724 	msm_gem_lock(obj);
725 
726 	if (msm_obj->madv != __MSM_MADV_PURGED)
727 		msm_obj->madv = madv;
728 
729 	madv = msm_obj->madv;
730 
731 	/* If the obj is inactive, we might need to move it
732 	 * between inactive lists
733 	 */
734 	update_lru(obj);
735 
736 	msm_gem_unlock(obj);
737 
738 	return (madv != __MSM_MADV_PURGED);
739 }
740 
741 void msm_gem_purge(struct drm_gem_object *obj)
742 {
743 	struct drm_device *dev = obj->dev;
744 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
745 
746 	msm_gem_assert_locked(obj);
747 	GEM_WARN_ON(!is_purgeable(msm_obj));
748 
749 	/* Get rid of any iommu mapping(s): */
750 	put_iova_spaces(obj, true);
751 
752 	msm_gem_vunmap(obj);
753 
754 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
755 
756 	put_pages(obj);
757 
758 	put_iova_vmas(obj);
759 
760 	msm_obj->madv = __MSM_MADV_PURGED;
761 
762 	drm_gem_free_mmap_offset(obj);
763 
764 	/* Our goal here is to return as much of the memory as
765 	 * is possible back to the system as we are called from OOM.
766 	 * To do this we must instruct the shmfs to drop all of its
767 	 * backing pages, *now*.
768 	 */
769 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
770 
771 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
772 			0, (loff_t)-1);
773 }
774 
775 /*
776  * Unpin the backing pages and make them available to be swapped out.
777  */
778 void msm_gem_evict(struct drm_gem_object *obj)
779 {
780 	struct drm_device *dev = obj->dev;
781 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
782 
783 	msm_gem_assert_locked(obj);
784 	GEM_WARN_ON(is_unevictable(msm_obj));
785 
786 	/* Get rid of any iommu mapping(s): */
787 	put_iova_spaces(obj, false);
788 
789 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
790 
791 	put_pages(obj);
792 }
793 
794 void msm_gem_vunmap(struct drm_gem_object *obj)
795 {
796 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
797 
798 	msm_gem_assert_locked(obj);
799 
800 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
801 		return;
802 
803 	vunmap(msm_obj->vaddr);
804 	msm_obj->vaddr = NULL;
805 }
806 
807 static void update_lru(struct drm_gem_object *obj)
808 {
809 	struct msm_drm_private *priv = obj->dev->dev_private;
810 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
811 
812 	msm_gem_assert_locked(&msm_obj->base);
813 
814 	if (!msm_obj->pages) {
815 		GEM_WARN_ON(msm_obj->pin_count);
816 		GEM_WARN_ON(msm_obj->vmap_count);
817 
818 		drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
819 	} else if (msm_obj->pin_count || msm_obj->vmap_count) {
820 		drm_gem_lru_move_tail(&priv->lru.pinned, obj);
821 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
822 		drm_gem_lru_move_tail(&priv->lru.willneed, obj);
823 	} else {
824 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
825 
826 		drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
827 	}
828 }
829 
830 bool msm_gem_active(struct drm_gem_object *obj)
831 {
832 	msm_gem_assert_locked(obj);
833 
834 	if (to_msm_bo(obj)->pin_count)
835 		return true;
836 
837 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
838 }
839 
840 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
841 {
842 	bool write = !!(op & MSM_PREP_WRITE);
843 	unsigned long remain =
844 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
845 	long ret;
846 
847 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
848 				    true,  remain);
849 	if (ret == 0)
850 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
851 	else if (ret < 0)
852 		return ret;
853 
854 	/* TODO cache maintenance */
855 
856 	return 0;
857 }
858 
859 int msm_gem_cpu_fini(struct drm_gem_object *obj)
860 {
861 	/* TODO cache maintenance */
862 	return 0;
863 }
864 
865 #ifdef CONFIG_DEBUG_FS
866 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
867 		struct msm_gem_stats *stats)
868 {
869 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
870 	struct dma_resv *robj = obj->resv;
871 	struct msm_gem_vma *vma;
872 	uint64_t off = drm_vma_node_start(&obj->vma_node);
873 	const char *madv;
874 
875 	msm_gem_lock(obj);
876 
877 	stats->all.count++;
878 	stats->all.size += obj->size;
879 
880 	if (msm_gem_active(obj)) {
881 		stats->active.count++;
882 		stats->active.size += obj->size;
883 	}
884 
885 	if (msm_obj->pages) {
886 		stats->resident.count++;
887 		stats->resident.size += obj->size;
888 	}
889 
890 	switch (msm_obj->madv) {
891 	case __MSM_MADV_PURGED:
892 		stats->purged.count++;
893 		stats->purged.size += obj->size;
894 		madv = " purged";
895 		break;
896 	case MSM_MADV_DONTNEED:
897 		stats->purgeable.count++;
898 		stats->purgeable.size += obj->size;
899 		madv = " purgeable";
900 		break;
901 	case MSM_MADV_WILLNEED:
902 	default:
903 		madv = "";
904 		break;
905 	}
906 
907 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
908 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
909 			obj->name, kref_read(&obj->refcount),
910 			off, msm_obj->vaddr);
911 
912 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
913 
914 	if (!list_empty(&msm_obj->vmas)) {
915 
916 		seq_puts(m, "      vmas:");
917 
918 		list_for_each_entry(vma, &msm_obj->vmas, list) {
919 			const char *name, *comm;
920 			if (vma->aspace) {
921 				struct msm_gem_address_space *aspace = vma->aspace;
922 				struct task_struct *task =
923 					get_pid_task(aspace->pid, PIDTYPE_PID);
924 				if (task) {
925 					comm = kstrdup(task->comm, GFP_KERNEL);
926 					put_task_struct(task);
927 				} else {
928 					comm = NULL;
929 				}
930 				name = aspace->name;
931 			} else {
932 				name = comm = NULL;
933 			}
934 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
935 				name, comm ? ":" : "", comm ? comm : "",
936 				vma->aspace, vma->iova,
937 				vma->mapped ? "mapped" : "unmapped",
938 				msm_gem_vma_inuse(vma));
939 			kfree(comm);
940 		}
941 
942 		seq_puts(m, "\n");
943 	}
944 
945 	dma_resv_describe(robj, m);
946 	msm_gem_unlock(obj);
947 }
948 
949 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
950 {
951 	struct msm_gem_stats stats = {};
952 	struct msm_gem_object *msm_obj;
953 
954 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
955 	list_for_each_entry(msm_obj, list, node) {
956 		struct drm_gem_object *obj = &msm_obj->base;
957 		seq_puts(m, "   ");
958 		msm_gem_describe(obj, m, &stats);
959 	}
960 
961 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
962 			stats.all.count, stats.all.size);
963 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
964 			stats.active.count, stats.active.size);
965 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
966 			stats.resident.count, stats.resident.size);
967 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
968 			stats.purgeable.count, stats.purgeable.size);
969 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
970 			stats.purged.count, stats.purged.size);
971 }
972 #endif
973 
974 /* don't call directly!  Use drm_gem_object_put() */
975 static void msm_gem_free_object(struct drm_gem_object *obj)
976 {
977 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
978 	struct drm_device *dev = obj->dev;
979 	struct msm_drm_private *priv = dev->dev_private;
980 
981 	mutex_lock(&priv->obj_lock);
982 	list_del(&msm_obj->node);
983 	mutex_unlock(&priv->obj_lock);
984 
985 	put_iova_spaces(obj, true);
986 
987 	if (obj->import_attach) {
988 		GEM_WARN_ON(msm_obj->vaddr);
989 
990 		/* Don't drop the pages for imported dmabuf, as they are not
991 		 * ours, just free the array we allocated:
992 		 */
993 		kvfree(msm_obj->pages);
994 
995 		put_iova_vmas(obj);
996 
997 		drm_prime_gem_destroy(obj, msm_obj->sgt);
998 	} else {
999 		msm_gem_vunmap(obj);
1000 		put_pages(obj);
1001 		put_iova_vmas(obj);
1002 	}
1003 
1004 	drm_gem_object_release(obj);
1005 
1006 	kfree(msm_obj);
1007 }
1008 
1009 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1010 {
1011 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1012 
1013 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1014 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1015 
1016 	return 0;
1017 }
1018 
1019 /* convenience method to construct a GEM buffer object, and userspace handle */
1020 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1021 		uint32_t size, uint32_t flags, uint32_t *handle,
1022 		char *name)
1023 {
1024 	struct drm_gem_object *obj;
1025 	int ret;
1026 
1027 	obj = msm_gem_new(dev, size, flags);
1028 
1029 	if (IS_ERR(obj))
1030 		return PTR_ERR(obj);
1031 
1032 	if (name)
1033 		msm_gem_object_set_name(obj, "%s", name);
1034 
1035 	ret = drm_gem_handle_create(file, obj, handle);
1036 
1037 	/* drop reference from allocate - handle holds it now */
1038 	drm_gem_object_put(obj);
1039 
1040 	return ret;
1041 }
1042 
1043 static const struct vm_operations_struct vm_ops = {
1044 	.fault = msm_gem_fault,
1045 	.open = drm_gem_vm_open,
1046 	.close = drm_gem_vm_close,
1047 };
1048 
1049 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1050 	.free = msm_gem_free_object,
1051 	.pin = msm_gem_prime_pin,
1052 	.unpin = msm_gem_prime_unpin,
1053 	.get_sg_table = msm_gem_prime_get_sg_table,
1054 	.vmap = msm_gem_prime_vmap,
1055 	.vunmap = msm_gem_prime_vunmap,
1056 	.mmap = msm_gem_object_mmap,
1057 	.vm_ops = &vm_ops,
1058 };
1059 
1060 static int msm_gem_new_impl(struct drm_device *dev,
1061 		uint32_t size, uint32_t flags,
1062 		struct drm_gem_object **obj)
1063 {
1064 	struct msm_drm_private *priv = dev->dev_private;
1065 	struct msm_gem_object *msm_obj;
1066 
1067 	switch (flags & MSM_BO_CACHE_MASK) {
1068 	case MSM_BO_CACHED:
1069 	case MSM_BO_WC:
1070 		break;
1071 	case MSM_BO_CACHED_COHERENT:
1072 		if (priv->has_cached_coherent)
1073 			break;
1074 		fallthrough;
1075 	default:
1076 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1077 				(flags & MSM_BO_CACHE_MASK));
1078 		return -EINVAL;
1079 	}
1080 
1081 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1082 	if (!msm_obj)
1083 		return -ENOMEM;
1084 
1085 	msm_obj->flags = flags;
1086 	msm_obj->madv = MSM_MADV_WILLNEED;
1087 
1088 	INIT_LIST_HEAD(&msm_obj->node);
1089 	INIT_LIST_HEAD(&msm_obj->vmas);
1090 
1091 	*obj = &msm_obj->base;
1092 	(*obj)->funcs = &msm_gem_object_funcs;
1093 
1094 	return 0;
1095 }
1096 
1097 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1098 {
1099 	struct msm_drm_private *priv = dev->dev_private;
1100 	struct msm_gem_object *msm_obj;
1101 	struct drm_gem_object *obj = NULL;
1102 	bool use_vram = false;
1103 	int ret;
1104 
1105 	size = PAGE_ALIGN(size);
1106 
1107 	if (!msm_use_mmu(dev))
1108 		use_vram = true;
1109 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1110 		use_vram = true;
1111 
1112 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1113 		return ERR_PTR(-EINVAL);
1114 
1115 	/* Disallow zero sized objects as they make the underlying
1116 	 * infrastructure grumpy
1117 	 */
1118 	if (size == 0)
1119 		return ERR_PTR(-EINVAL);
1120 
1121 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1122 	if (ret)
1123 		return ERR_PTR(ret);
1124 
1125 	msm_obj = to_msm_bo(obj);
1126 
1127 	if (use_vram) {
1128 		struct msm_gem_vma *vma;
1129 		struct page **pages;
1130 
1131 		drm_gem_private_object_init(dev, obj, size);
1132 
1133 		msm_gem_lock(obj);
1134 
1135 		vma = add_vma(obj, NULL);
1136 		msm_gem_unlock(obj);
1137 		if (IS_ERR(vma)) {
1138 			ret = PTR_ERR(vma);
1139 			goto fail;
1140 		}
1141 
1142 		to_msm_bo(obj)->vram_node = &vma->node;
1143 
1144 		msm_gem_lock(obj);
1145 		pages = get_pages(obj);
1146 		msm_gem_unlock(obj);
1147 		if (IS_ERR(pages)) {
1148 			ret = PTR_ERR(pages);
1149 			goto fail;
1150 		}
1151 
1152 		vma->iova = physaddr(obj);
1153 	} else {
1154 		ret = drm_gem_object_init(dev, obj, size);
1155 		if (ret)
1156 			goto fail;
1157 		/*
1158 		 * Our buffers are kept pinned, so allocating them from the
1159 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1160 		 * See comments above new_inode() why this is required _and_
1161 		 * expected if you're going to pin these pages.
1162 		 */
1163 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1164 	}
1165 
1166 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1167 
1168 	mutex_lock(&priv->obj_lock);
1169 	list_add_tail(&msm_obj->node, &priv->objects);
1170 	mutex_unlock(&priv->obj_lock);
1171 
1172 	return obj;
1173 
1174 fail:
1175 	drm_gem_object_put(obj);
1176 	return ERR_PTR(ret);
1177 }
1178 
1179 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1180 		struct dma_buf *dmabuf, struct sg_table *sgt)
1181 {
1182 	struct msm_drm_private *priv = dev->dev_private;
1183 	struct msm_gem_object *msm_obj;
1184 	struct drm_gem_object *obj;
1185 	uint32_t size;
1186 	int ret, npages;
1187 
1188 	/* if we don't have IOMMU, don't bother pretending we can import: */
1189 	if (!msm_use_mmu(dev)) {
1190 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1191 		return ERR_PTR(-EINVAL);
1192 	}
1193 
1194 	size = PAGE_ALIGN(dmabuf->size);
1195 
1196 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1197 	if (ret)
1198 		return ERR_PTR(ret);
1199 
1200 	drm_gem_private_object_init(dev, obj, size);
1201 
1202 	npages = size / PAGE_SIZE;
1203 
1204 	msm_obj = to_msm_bo(obj);
1205 	msm_gem_lock(obj);
1206 	msm_obj->sgt = sgt;
1207 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1208 	if (!msm_obj->pages) {
1209 		msm_gem_unlock(obj);
1210 		ret = -ENOMEM;
1211 		goto fail;
1212 	}
1213 
1214 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1215 	if (ret) {
1216 		msm_gem_unlock(obj);
1217 		goto fail;
1218 	}
1219 
1220 	msm_gem_unlock(obj);
1221 
1222 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1223 
1224 	mutex_lock(&priv->obj_lock);
1225 	list_add_tail(&msm_obj->node, &priv->objects);
1226 	mutex_unlock(&priv->obj_lock);
1227 
1228 	return obj;
1229 
1230 fail:
1231 	drm_gem_object_put(obj);
1232 	return ERR_PTR(ret);
1233 }
1234 
1235 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1236 		uint32_t flags, struct msm_gem_address_space *aspace,
1237 		struct drm_gem_object **bo, uint64_t *iova)
1238 {
1239 	void *vaddr;
1240 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1241 	int ret;
1242 
1243 	if (IS_ERR(obj))
1244 		return ERR_CAST(obj);
1245 
1246 	if (iova) {
1247 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1248 		if (ret)
1249 			goto err;
1250 	}
1251 
1252 	vaddr = msm_gem_get_vaddr(obj);
1253 	if (IS_ERR(vaddr)) {
1254 		msm_gem_unpin_iova(obj, aspace);
1255 		ret = PTR_ERR(vaddr);
1256 		goto err;
1257 	}
1258 
1259 	if (bo)
1260 		*bo = obj;
1261 
1262 	return vaddr;
1263 err:
1264 	drm_gem_object_put(obj);
1265 
1266 	return ERR_PTR(ret);
1267 
1268 }
1269 
1270 void msm_gem_kernel_put(struct drm_gem_object *bo,
1271 		struct msm_gem_address_space *aspace)
1272 {
1273 	if (IS_ERR_OR_NULL(bo))
1274 		return;
1275 
1276 	msm_gem_put_vaddr(bo);
1277 	msm_gem_unpin_iova(bo, aspace);
1278 	drm_gem_object_put(bo);
1279 }
1280 
1281 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1282 {
1283 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1284 	va_list ap;
1285 
1286 	if (!fmt)
1287 		return;
1288 
1289 	va_start(ap, fmt);
1290 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1291 	va_end(ap);
1292 }
1293