xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 4a02a376)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21 
22 static dma_addr_t physaddr(struct drm_gem_object *obj)
23 {
24 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 	struct msm_drm_private *priv = obj->dev->dev_private;
26 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
27 			priv->vram.paddr;
28 }
29 
30 static bool use_pages(struct drm_gem_object *obj)
31 {
32 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
33 	return !msm_obj->vram_node;
34 }
35 
36 /*
37  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
38  * API.  Really GPU cache is out of scope here (handled on cmdstream)
39  * and all we need to do is invalidate newly allocated pages before
40  * mapping to CPU as uncached/writecombine.
41  *
42  * On top of this, we have the added headache, that depending on
43  * display generation, the display's iommu may be wired up to either
44  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
45  * that here we either have dma-direct or iommu ops.
46  *
47  * Let this be a cautionary tail of abstraction gone wrong.
48  */
49 
50 static void sync_for_device(struct msm_gem_object *msm_obj)
51 {
52 	struct device *dev = msm_obj->base.dev->dev;
53 
54 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
55 }
56 
57 static void sync_for_cpu(struct msm_gem_object *msm_obj)
58 {
59 	struct device *dev = msm_obj->base.dev->dev;
60 
61 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
62 }
63 
64 static void update_lru(struct drm_gem_object *obj)
65 {
66 	struct msm_drm_private *priv = obj->dev->dev_private;
67 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
68 
69 	msm_gem_assert_locked(&msm_obj->base);
70 
71 	if (!msm_obj->pages) {
72 		GEM_WARN_ON(msm_obj->pin_count);
73 
74 		drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
75 	} else if (msm_obj->pin_count) {
76 		drm_gem_lru_move_tail(&priv->lru.pinned, obj);
77 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
78 		drm_gem_lru_move_tail(&priv->lru.willneed, obj);
79 	} else {
80 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
81 
82 		drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
83 	}
84 }
85 
86 /* allocate pages from VRAM carveout, used when no IOMMU: */
87 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
88 {
89 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
90 	struct msm_drm_private *priv = obj->dev->dev_private;
91 	dma_addr_t paddr;
92 	struct page **p;
93 	int ret, i;
94 
95 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
96 	if (!p)
97 		return ERR_PTR(-ENOMEM);
98 
99 	spin_lock(&priv->vram.lock);
100 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
101 	spin_unlock(&priv->vram.lock);
102 	if (ret) {
103 		kvfree(p);
104 		return ERR_PTR(ret);
105 	}
106 
107 	paddr = physaddr(obj);
108 	for (i = 0; i < npages; i++) {
109 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
110 		paddr += PAGE_SIZE;
111 	}
112 
113 	return p;
114 }
115 
116 static struct page **get_pages(struct drm_gem_object *obj)
117 {
118 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
119 
120 	msm_gem_assert_locked(obj);
121 
122 	if (!msm_obj->pages) {
123 		struct drm_device *dev = obj->dev;
124 		struct page **p;
125 		int npages = obj->size >> PAGE_SHIFT;
126 
127 		if (use_pages(obj))
128 			p = drm_gem_get_pages(obj);
129 		else
130 			p = get_pages_vram(obj, npages);
131 
132 		if (IS_ERR(p)) {
133 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
134 					PTR_ERR(p));
135 			return p;
136 		}
137 
138 		msm_obj->pages = p;
139 
140 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
141 		if (IS_ERR(msm_obj->sgt)) {
142 			void *ptr = ERR_CAST(msm_obj->sgt);
143 
144 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
145 			msm_obj->sgt = NULL;
146 			return ptr;
147 		}
148 
149 		/* For non-cached buffers, ensure the new pages are clean
150 		 * because display controller, GPU, etc. are not coherent:
151 		 */
152 		if (msm_obj->flags & MSM_BO_WC)
153 			sync_for_device(msm_obj);
154 
155 		update_lru(obj);
156 	}
157 
158 	return msm_obj->pages;
159 }
160 
161 static void put_pages_vram(struct drm_gem_object *obj)
162 {
163 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
164 	struct msm_drm_private *priv = obj->dev->dev_private;
165 
166 	spin_lock(&priv->vram.lock);
167 	drm_mm_remove_node(msm_obj->vram_node);
168 	spin_unlock(&priv->vram.lock);
169 
170 	kvfree(msm_obj->pages);
171 }
172 
173 static void put_pages(struct drm_gem_object *obj)
174 {
175 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
176 
177 	if (msm_obj->pages) {
178 		if (msm_obj->sgt) {
179 			/* For non-cached buffers, ensure the new
180 			 * pages are clean because display controller,
181 			 * GPU, etc. are not coherent:
182 			 */
183 			if (msm_obj->flags & MSM_BO_WC)
184 				sync_for_cpu(msm_obj);
185 
186 			sg_free_table(msm_obj->sgt);
187 			kfree(msm_obj->sgt);
188 			msm_obj->sgt = NULL;
189 		}
190 
191 		if (use_pages(obj))
192 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
193 		else
194 			put_pages_vram(obj);
195 
196 		msm_obj->pages = NULL;
197 		update_lru(obj);
198 	}
199 }
200 
201 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
202 {
203 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
204 	struct page **p;
205 
206 	msm_gem_assert_locked(obj);
207 
208 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
209 		return ERR_PTR(-EBUSY);
210 	}
211 
212 	p = get_pages(obj);
213 	if (!IS_ERR(p)) {
214 		to_msm_bo(obj)->pin_count++;
215 		update_lru(obj);
216 	}
217 
218 	return p;
219 }
220 
221 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
222 {
223 	struct page **p;
224 
225 	msm_gem_lock(obj);
226 	p = msm_gem_pin_pages_locked(obj);
227 	msm_gem_unlock(obj);
228 
229 	return p;
230 }
231 
232 void msm_gem_unpin_pages(struct drm_gem_object *obj)
233 {
234 	msm_gem_lock(obj);
235 	msm_gem_unpin_locked(obj);
236 	msm_gem_unlock(obj);
237 }
238 
239 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
240 {
241 	if (msm_obj->flags & MSM_BO_WC)
242 		return pgprot_writecombine(prot);
243 	return prot;
244 }
245 
246 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
247 {
248 	struct vm_area_struct *vma = vmf->vma;
249 	struct drm_gem_object *obj = vma->vm_private_data;
250 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
251 	struct page **pages;
252 	unsigned long pfn;
253 	pgoff_t pgoff;
254 	int err;
255 	vm_fault_t ret;
256 
257 	/*
258 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
259 	 * a reference on obj. So, we dont need to hold one here.
260 	 */
261 	err = msm_gem_lock_interruptible(obj);
262 	if (err) {
263 		ret = VM_FAULT_NOPAGE;
264 		goto out;
265 	}
266 
267 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
268 		msm_gem_unlock(obj);
269 		return VM_FAULT_SIGBUS;
270 	}
271 
272 	/* make sure we have pages attached now */
273 	pages = get_pages(obj);
274 	if (IS_ERR(pages)) {
275 		ret = vmf_error(PTR_ERR(pages));
276 		goto out_unlock;
277 	}
278 
279 	/* We don't use vmf->pgoff since that has the fake offset: */
280 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
281 
282 	pfn = page_to_pfn(pages[pgoff]);
283 
284 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
285 			pfn, pfn << PAGE_SHIFT);
286 
287 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
288 
289 out_unlock:
290 	msm_gem_unlock(obj);
291 out:
292 	return ret;
293 }
294 
295 /** get mmap offset */
296 static uint64_t mmap_offset(struct drm_gem_object *obj)
297 {
298 	struct drm_device *dev = obj->dev;
299 	int ret;
300 
301 	msm_gem_assert_locked(obj);
302 
303 	/* Make it mmapable */
304 	ret = drm_gem_create_mmap_offset(obj);
305 
306 	if (ret) {
307 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
308 		return 0;
309 	}
310 
311 	return drm_vma_node_offset_addr(&obj->vma_node);
312 }
313 
314 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
315 {
316 	uint64_t offset;
317 
318 	msm_gem_lock(obj);
319 	offset = mmap_offset(obj);
320 	msm_gem_unlock(obj);
321 	return offset;
322 }
323 
324 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
325 		struct msm_gem_address_space *aspace)
326 {
327 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
328 	struct msm_gem_vma *vma;
329 
330 	msm_gem_assert_locked(obj);
331 
332 	vma = msm_gem_vma_new(aspace);
333 	if (!vma)
334 		return ERR_PTR(-ENOMEM);
335 
336 	list_add_tail(&vma->list, &msm_obj->vmas);
337 
338 	return vma;
339 }
340 
341 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
342 		struct msm_gem_address_space *aspace)
343 {
344 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
345 	struct msm_gem_vma *vma;
346 
347 	msm_gem_assert_locked(obj);
348 
349 	list_for_each_entry(vma, &msm_obj->vmas, list) {
350 		if (vma->aspace == aspace)
351 			return vma;
352 	}
353 
354 	return NULL;
355 }
356 
357 static void del_vma(struct msm_gem_vma *vma)
358 {
359 	if (!vma)
360 		return;
361 
362 	list_del(&vma->list);
363 	kfree(vma);
364 }
365 
366 /*
367  * If close is true, this also closes the VMA (releasing the allocated
368  * iova range) in addition to removing the iommu mapping.  In the eviction
369  * case (!close), we keep the iova allocated, but only remove the iommu
370  * mapping.
371  */
372 static void
373 put_iova_spaces(struct drm_gem_object *obj, bool close)
374 {
375 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 	struct msm_gem_vma *vma;
377 
378 	msm_gem_assert_locked(obj);
379 
380 	list_for_each_entry(vma, &msm_obj->vmas, list) {
381 		if (vma->aspace) {
382 			msm_gem_vma_purge(vma);
383 			if (close)
384 				msm_gem_vma_close(vma);
385 		}
386 	}
387 }
388 
389 /* Called with msm_obj locked */
390 static void
391 put_iova_vmas(struct drm_gem_object *obj)
392 {
393 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
394 	struct msm_gem_vma *vma, *tmp;
395 
396 	msm_gem_assert_locked(obj);
397 
398 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
399 		del_vma(vma);
400 	}
401 }
402 
403 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
404 		struct msm_gem_address_space *aspace,
405 		u64 range_start, u64 range_end)
406 {
407 	struct msm_gem_vma *vma;
408 
409 	msm_gem_assert_locked(obj);
410 
411 	vma = lookup_vma(obj, aspace);
412 
413 	if (!vma) {
414 		int ret;
415 
416 		vma = add_vma(obj, aspace);
417 		if (IS_ERR(vma))
418 			return vma;
419 
420 		ret = msm_gem_vma_init(vma, obj->size,
421 			range_start, range_end);
422 		if (ret) {
423 			del_vma(vma);
424 			return ERR_PTR(ret);
425 		}
426 	} else {
427 		GEM_WARN_ON(vma->iova < range_start);
428 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
429 	}
430 
431 	return vma;
432 }
433 
434 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
435 {
436 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
437 	struct page **pages;
438 	int ret, prot = IOMMU_READ;
439 
440 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
441 		prot |= IOMMU_WRITE;
442 
443 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
444 		prot |= IOMMU_PRIV;
445 
446 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
447 		prot |= IOMMU_CACHE;
448 
449 	msm_gem_assert_locked(obj);
450 
451 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
452 		return -EBUSY;
453 
454 	pages = msm_gem_pin_pages_locked(obj);
455 	if (IS_ERR(pages))
456 		return PTR_ERR(pages);
457 
458 	ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
459 	if (ret)
460 		msm_gem_unpin_locked(obj);
461 
462 	return ret;
463 }
464 
465 void msm_gem_unpin_locked(struct drm_gem_object *obj)
466 {
467 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
468 
469 	msm_gem_assert_locked(obj);
470 
471 	msm_obj->pin_count--;
472 	GEM_WARN_ON(msm_obj->pin_count < 0);
473 
474 	update_lru(obj);
475 }
476 
477 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
478 					   struct msm_gem_address_space *aspace)
479 {
480 	return get_vma_locked(obj, aspace, 0, U64_MAX);
481 }
482 
483 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
484 		struct msm_gem_address_space *aspace, uint64_t *iova,
485 		u64 range_start, u64 range_end)
486 {
487 	struct msm_gem_vma *vma;
488 	int ret;
489 
490 	msm_gem_assert_locked(obj);
491 
492 	vma = get_vma_locked(obj, aspace, range_start, range_end);
493 	if (IS_ERR(vma))
494 		return PTR_ERR(vma);
495 
496 	ret = msm_gem_pin_vma_locked(obj, vma);
497 	if (!ret)
498 		*iova = vma->iova;
499 
500 	return ret;
501 }
502 
503 /*
504  * get iova and pin it. Should have a matching put
505  * limits iova to specified range (in pages)
506  */
507 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
508 		struct msm_gem_address_space *aspace, uint64_t *iova,
509 		u64 range_start, u64 range_end)
510 {
511 	int ret;
512 
513 	msm_gem_lock(obj);
514 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
515 	msm_gem_unlock(obj);
516 
517 	return ret;
518 }
519 
520 /* get iova and pin it. Should have a matching put */
521 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
522 		struct msm_gem_address_space *aspace, uint64_t *iova)
523 {
524 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
525 }
526 
527 /*
528  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
529  * valid for the life of the object
530  */
531 int msm_gem_get_iova(struct drm_gem_object *obj,
532 		struct msm_gem_address_space *aspace, uint64_t *iova)
533 {
534 	struct msm_gem_vma *vma;
535 	int ret = 0;
536 
537 	msm_gem_lock(obj);
538 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
539 	if (IS_ERR(vma)) {
540 		ret = PTR_ERR(vma);
541 	} else {
542 		*iova = vma->iova;
543 	}
544 	msm_gem_unlock(obj);
545 
546 	return ret;
547 }
548 
549 static int clear_iova(struct drm_gem_object *obj,
550 		      struct msm_gem_address_space *aspace)
551 {
552 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
553 
554 	if (!vma)
555 		return 0;
556 
557 	if (msm_gem_vma_inuse(vma))
558 		return -EBUSY;
559 
560 	msm_gem_vma_purge(vma);
561 	msm_gem_vma_close(vma);
562 	del_vma(vma);
563 
564 	return 0;
565 }
566 
567 /*
568  * Get the requested iova but don't pin it.  Fails if the requested iova is
569  * not available.  Doesn't need a put because iovas are currently valid for
570  * the life of the object.
571  *
572  * Setting an iova of zero will clear the vma.
573  */
574 int msm_gem_set_iova(struct drm_gem_object *obj,
575 		     struct msm_gem_address_space *aspace, uint64_t iova)
576 {
577 	int ret = 0;
578 
579 	msm_gem_lock(obj);
580 	if (!iova) {
581 		ret = clear_iova(obj, aspace);
582 	} else {
583 		struct msm_gem_vma *vma;
584 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
585 		if (IS_ERR(vma)) {
586 			ret = PTR_ERR(vma);
587 		} else if (GEM_WARN_ON(vma->iova != iova)) {
588 			clear_iova(obj, aspace);
589 			ret = -EBUSY;
590 		}
591 	}
592 	msm_gem_unlock(obj);
593 
594 	return ret;
595 }
596 
597 /*
598  * Unpin a iova by updating the reference counts. The memory isn't actually
599  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
600  * to get rid of it
601  */
602 void msm_gem_unpin_iova(struct drm_gem_object *obj,
603 		struct msm_gem_address_space *aspace)
604 {
605 	struct msm_gem_vma *vma;
606 
607 	msm_gem_lock(obj);
608 	vma = lookup_vma(obj, aspace);
609 	if (!GEM_WARN_ON(!vma)) {
610 		msm_gem_vma_unpin(vma);
611 		msm_gem_unpin_locked(obj);
612 	}
613 	msm_gem_unlock(obj);
614 }
615 
616 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
617 		struct drm_mode_create_dumb *args)
618 {
619 	args->pitch = align_pitch(args->width, args->bpp);
620 	args->size  = PAGE_ALIGN(args->pitch * args->height);
621 	return msm_gem_new_handle(dev, file, args->size,
622 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
623 }
624 
625 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
626 		uint32_t handle, uint64_t *offset)
627 {
628 	struct drm_gem_object *obj;
629 	int ret = 0;
630 
631 	/* GEM does all our handle to object mapping */
632 	obj = drm_gem_object_lookup(file, handle);
633 	if (obj == NULL) {
634 		ret = -ENOENT;
635 		goto fail;
636 	}
637 
638 	*offset = msm_gem_mmap_offset(obj);
639 
640 	drm_gem_object_put(obj);
641 
642 fail:
643 	return ret;
644 }
645 
646 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
647 {
648 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
649 	struct page **pages;
650 	int ret = 0;
651 
652 	msm_gem_assert_locked(obj);
653 
654 	if (obj->import_attach)
655 		return ERR_PTR(-ENODEV);
656 
657 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
658 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
659 			msm_obj->madv, madv);
660 		return ERR_PTR(-EBUSY);
661 	}
662 
663 	pages = msm_gem_pin_pages_locked(obj);
664 	if (IS_ERR(pages))
665 		return ERR_CAST(pages);
666 
667 	/* increment vmap_count *before* vmap() call, so shrinker can
668 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
669 	 * This guarantees that we won't try to msm_gem_vunmap() this
670 	 * same object from within the vmap() call (while we already
671 	 * hold msm_obj lock)
672 	 */
673 	msm_obj->vmap_count++;
674 
675 	if (!msm_obj->vaddr) {
676 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
677 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
678 		if (msm_obj->vaddr == NULL) {
679 			ret = -ENOMEM;
680 			goto fail;
681 		}
682 	}
683 
684 	return msm_obj->vaddr;
685 
686 fail:
687 	msm_obj->vmap_count--;
688 	msm_gem_unpin_locked(obj);
689 	return ERR_PTR(ret);
690 }
691 
692 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
693 {
694 	return get_vaddr(obj, MSM_MADV_WILLNEED);
695 }
696 
697 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
698 {
699 	void *ret;
700 
701 	msm_gem_lock(obj);
702 	ret = msm_gem_get_vaddr_locked(obj);
703 	msm_gem_unlock(obj);
704 
705 	return ret;
706 }
707 
708 /*
709  * Don't use this!  It is for the very special case of dumping
710  * submits from GPU hangs or faults, were the bo may already
711  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
712  * active list.
713  */
714 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
715 {
716 	return get_vaddr(obj, __MSM_MADV_PURGED);
717 }
718 
719 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
720 {
721 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
722 
723 	msm_gem_assert_locked(obj);
724 	GEM_WARN_ON(msm_obj->vmap_count < 1);
725 
726 	msm_obj->vmap_count--;
727 	msm_gem_unpin_locked(obj);
728 }
729 
730 void msm_gem_put_vaddr(struct drm_gem_object *obj)
731 {
732 	msm_gem_lock(obj);
733 	msm_gem_put_vaddr_locked(obj);
734 	msm_gem_unlock(obj);
735 }
736 
737 /* Update madvise status, returns true if not purged, else
738  * false or -errno.
739  */
740 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
741 {
742 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
743 
744 	msm_gem_lock(obj);
745 
746 	if (msm_obj->madv != __MSM_MADV_PURGED)
747 		msm_obj->madv = madv;
748 
749 	madv = msm_obj->madv;
750 
751 	/* If the obj is inactive, we might need to move it
752 	 * between inactive lists
753 	 */
754 	update_lru(obj);
755 
756 	msm_gem_unlock(obj);
757 
758 	return (madv != __MSM_MADV_PURGED);
759 }
760 
761 void msm_gem_purge(struct drm_gem_object *obj)
762 {
763 	struct drm_device *dev = obj->dev;
764 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
765 
766 	msm_gem_assert_locked(obj);
767 	GEM_WARN_ON(!is_purgeable(msm_obj));
768 
769 	/* Get rid of any iommu mapping(s): */
770 	put_iova_spaces(obj, true);
771 
772 	msm_gem_vunmap(obj);
773 
774 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
775 
776 	put_pages(obj);
777 
778 	put_iova_vmas(obj);
779 
780 	msm_obj->madv = __MSM_MADV_PURGED;
781 
782 	drm_gem_free_mmap_offset(obj);
783 
784 	/* Our goal here is to return as much of the memory as
785 	 * is possible back to the system as we are called from OOM.
786 	 * To do this we must instruct the shmfs to drop all of its
787 	 * backing pages, *now*.
788 	 */
789 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
790 
791 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
792 			0, (loff_t)-1);
793 }
794 
795 /*
796  * Unpin the backing pages and make them available to be swapped out.
797  */
798 void msm_gem_evict(struct drm_gem_object *obj)
799 {
800 	struct drm_device *dev = obj->dev;
801 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
802 
803 	msm_gem_assert_locked(obj);
804 	GEM_WARN_ON(is_unevictable(msm_obj));
805 
806 	/* Get rid of any iommu mapping(s): */
807 	put_iova_spaces(obj, false);
808 
809 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
810 
811 	put_pages(obj);
812 }
813 
814 void msm_gem_vunmap(struct drm_gem_object *obj)
815 {
816 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
817 
818 	msm_gem_assert_locked(obj);
819 
820 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
821 		return;
822 
823 	vunmap(msm_obj->vaddr);
824 	msm_obj->vaddr = NULL;
825 }
826 
827 bool msm_gem_active(struct drm_gem_object *obj)
828 {
829 	msm_gem_assert_locked(obj);
830 
831 	if (to_msm_bo(obj)->pin_count)
832 		return true;
833 
834 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
835 }
836 
837 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
838 {
839 	bool write = !!(op & MSM_PREP_WRITE);
840 	unsigned long remain =
841 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
842 	long ret;
843 
844 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
845 				    true,  remain);
846 	if (ret == 0)
847 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
848 	else if (ret < 0)
849 		return ret;
850 
851 	/* TODO cache maintenance */
852 
853 	return 0;
854 }
855 
856 int msm_gem_cpu_fini(struct drm_gem_object *obj)
857 {
858 	/* TODO cache maintenance */
859 	return 0;
860 }
861 
862 #ifdef CONFIG_DEBUG_FS
863 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
864 		struct msm_gem_stats *stats)
865 {
866 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
867 	struct dma_resv *robj = obj->resv;
868 	struct msm_gem_vma *vma;
869 	uint64_t off = drm_vma_node_start(&obj->vma_node);
870 	const char *madv;
871 
872 	msm_gem_lock(obj);
873 
874 	stats->all.count++;
875 	stats->all.size += obj->size;
876 
877 	if (msm_gem_active(obj)) {
878 		stats->active.count++;
879 		stats->active.size += obj->size;
880 	}
881 
882 	if (msm_obj->pages) {
883 		stats->resident.count++;
884 		stats->resident.size += obj->size;
885 	}
886 
887 	switch (msm_obj->madv) {
888 	case __MSM_MADV_PURGED:
889 		stats->purged.count++;
890 		stats->purged.size += obj->size;
891 		madv = " purged";
892 		break;
893 	case MSM_MADV_DONTNEED:
894 		stats->purgeable.count++;
895 		stats->purgeable.size += obj->size;
896 		madv = " purgeable";
897 		break;
898 	case MSM_MADV_WILLNEED:
899 	default:
900 		madv = "";
901 		break;
902 	}
903 
904 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
905 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
906 			obj->name, kref_read(&obj->refcount),
907 			off, msm_obj->vaddr);
908 
909 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
910 
911 	if (!list_empty(&msm_obj->vmas)) {
912 
913 		seq_puts(m, "      vmas:");
914 
915 		list_for_each_entry(vma, &msm_obj->vmas, list) {
916 			const char *name, *comm;
917 			if (vma->aspace) {
918 				struct msm_gem_address_space *aspace = vma->aspace;
919 				struct task_struct *task =
920 					get_pid_task(aspace->pid, PIDTYPE_PID);
921 				if (task) {
922 					comm = kstrdup(task->comm, GFP_KERNEL);
923 					put_task_struct(task);
924 				} else {
925 					comm = NULL;
926 				}
927 				name = aspace->name;
928 			} else {
929 				name = comm = NULL;
930 			}
931 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
932 				name, comm ? ":" : "", comm ? comm : "",
933 				vma->aspace, vma->iova,
934 				vma->mapped ? "mapped" : "unmapped",
935 				msm_gem_vma_inuse(vma));
936 			kfree(comm);
937 		}
938 
939 		seq_puts(m, "\n");
940 	}
941 
942 	dma_resv_describe(robj, m);
943 	msm_gem_unlock(obj);
944 }
945 
946 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
947 {
948 	struct msm_gem_stats stats = {};
949 	struct msm_gem_object *msm_obj;
950 
951 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
952 	list_for_each_entry(msm_obj, list, node) {
953 		struct drm_gem_object *obj = &msm_obj->base;
954 		seq_puts(m, "   ");
955 		msm_gem_describe(obj, m, &stats);
956 	}
957 
958 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
959 			stats.all.count, stats.all.size);
960 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
961 			stats.active.count, stats.active.size);
962 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
963 			stats.resident.count, stats.resident.size);
964 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
965 			stats.purgeable.count, stats.purgeable.size);
966 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
967 			stats.purged.count, stats.purged.size);
968 }
969 #endif
970 
971 /* don't call directly!  Use drm_gem_object_put() */
972 static void msm_gem_free_object(struct drm_gem_object *obj)
973 {
974 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
975 	struct drm_device *dev = obj->dev;
976 	struct msm_drm_private *priv = dev->dev_private;
977 
978 	mutex_lock(&priv->obj_lock);
979 	list_del(&msm_obj->node);
980 	mutex_unlock(&priv->obj_lock);
981 
982 	put_iova_spaces(obj, true);
983 
984 	if (obj->import_attach) {
985 		GEM_WARN_ON(msm_obj->vaddr);
986 
987 		/* Don't drop the pages for imported dmabuf, as they are not
988 		 * ours, just free the array we allocated:
989 		 */
990 		kvfree(msm_obj->pages);
991 
992 		put_iova_vmas(obj);
993 
994 		drm_prime_gem_destroy(obj, msm_obj->sgt);
995 	} else {
996 		msm_gem_vunmap(obj);
997 		put_pages(obj);
998 		put_iova_vmas(obj);
999 	}
1000 
1001 	drm_gem_object_release(obj);
1002 
1003 	kfree(msm_obj);
1004 }
1005 
1006 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1007 {
1008 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1009 
1010 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1011 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1012 
1013 	return 0;
1014 }
1015 
1016 /* convenience method to construct a GEM buffer object, and userspace handle */
1017 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1018 		uint32_t size, uint32_t flags, uint32_t *handle,
1019 		char *name)
1020 {
1021 	struct drm_gem_object *obj;
1022 	int ret;
1023 
1024 	obj = msm_gem_new(dev, size, flags);
1025 
1026 	if (IS_ERR(obj))
1027 		return PTR_ERR(obj);
1028 
1029 	if (name)
1030 		msm_gem_object_set_name(obj, "%s", name);
1031 
1032 	ret = drm_gem_handle_create(file, obj, handle);
1033 
1034 	/* drop reference from allocate - handle holds it now */
1035 	drm_gem_object_put(obj);
1036 
1037 	return ret;
1038 }
1039 
1040 static const struct vm_operations_struct vm_ops = {
1041 	.fault = msm_gem_fault,
1042 	.open = drm_gem_vm_open,
1043 	.close = drm_gem_vm_close,
1044 };
1045 
1046 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1047 	.free = msm_gem_free_object,
1048 	.pin = msm_gem_prime_pin,
1049 	.unpin = msm_gem_prime_unpin,
1050 	.get_sg_table = msm_gem_prime_get_sg_table,
1051 	.vmap = msm_gem_prime_vmap,
1052 	.vunmap = msm_gem_prime_vunmap,
1053 	.mmap = msm_gem_object_mmap,
1054 	.vm_ops = &vm_ops,
1055 };
1056 
1057 static int msm_gem_new_impl(struct drm_device *dev,
1058 		uint32_t size, uint32_t flags,
1059 		struct drm_gem_object **obj)
1060 {
1061 	struct msm_drm_private *priv = dev->dev_private;
1062 	struct msm_gem_object *msm_obj;
1063 
1064 	switch (flags & MSM_BO_CACHE_MASK) {
1065 	case MSM_BO_CACHED:
1066 	case MSM_BO_WC:
1067 		break;
1068 	case MSM_BO_CACHED_COHERENT:
1069 		if (priv->has_cached_coherent)
1070 			break;
1071 		fallthrough;
1072 	default:
1073 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1074 				(flags & MSM_BO_CACHE_MASK));
1075 		return -EINVAL;
1076 	}
1077 
1078 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1079 	if (!msm_obj)
1080 		return -ENOMEM;
1081 
1082 	msm_obj->flags = flags;
1083 	msm_obj->madv = MSM_MADV_WILLNEED;
1084 
1085 	INIT_LIST_HEAD(&msm_obj->node);
1086 	INIT_LIST_HEAD(&msm_obj->vmas);
1087 
1088 	*obj = &msm_obj->base;
1089 	(*obj)->funcs = &msm_gem_object_funcs;
1090 
1091 	return 0;
1092 }
1093 
1094 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1095 {
1096 	struct msm_drm_private *priv = dev->dev_private;
1097 	struct msm_gem_object *msm_obj;
1098 	struct drm_gem_object *obj = NULL;
1099 	bool use_vram = false;
1100 	int ret;
1101 
1102 	size = PAGE_ALIGN(size);
1103 
1104 	if (!msm_use_mmu(dev))
1105 		use_vram = true;
1106 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1107 		use_vram = true;
1108 
1109 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1110 		return ERR_PTR(-EINVAL);
1111 
1112 	/* Disallow zero sized objects as they make the underlying
1113 	 * infrastructure grumpy
1114 	 */
1115 	if (size == 0)
1116 		return ERR_PTR(-EINVAL);
1117 
1118 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1119 	if (ret)
1120 		return ERR_PTR(ret);
1121 
1122 	msm_obj = to_msm_bo(obj);
1123 
1124 	if (use_vram) {
1125 		struct msm_gem_vma *vma;
1126 		struct page **pages;
1127 
1128 		drm_gem_private_object_init(dev, obj, size);
1129 
1130 		msm_gem_lock(obj);
1131 
1132 		vma = add_vma(obj, NULL);
1133 		msm_gem_unlock(obj);
1134 		if (IS_ERR(vma)) {
1135 			ret = PTR_ERR(vma);
1136 			goto fail;
1137 		}
1138 
1139 		to_msm_bo(obj)->vram_node = &vma->node;
1140 
1141 		msm_gem_lock(obj);
1142 		pages = get_pages(obj);
1143 		msm_gem_unlock(obj);
1144 		if (IS_ERR(pages)) {
1145 			ret = PTR_ERR(pages);
1146 			goto fail;
1147 		}
1148 
1149 		vma->iova = physaddr(obj);
1150 	} else {
1151 		ret = drm_gem_object_init(dev, obj, size);
1152 		if (ret)
1153 			goto fail;
1154 		/*
1155 		 * Our buffers are kept pinned, so allocating them from the
1156 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1157 		 * See comments above new_inode() why this is required _and_
1158 		 * expected if you're going to pin these pages.
1159 		 */
1160 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1161 	}
1162 
1163 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1164 
1165 	mutex_lock(&priv->obj_lock);
1166 	list_add_tail(&msm_obj->node, &priv->objects);
1167 	mutex_unlock(&priv->obj_lock);
1168 
1169 	return obj;
1170 
1171 fail:
1172 	drm_gem_object_put(obj);
1173 	return ERR_PTR(ret);
1174 }
1175 
1176 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1177 		struct dma_buf *dmabuf, struct sg_table *sgt)
1178 {
1179 	struct msm_drm_private *priv = dev->dev_private;
1180 	struct msm_gem_object *msm_obj;
1181 	struct drm_gem_object *obj;
1182 	uint32_t size;
1183 	int ret, npages;
1184 
1185 	/* if we don't have IOMMU, don't bother pretending we can import: */
1186 	if (!msm_use_mmu(dev)) {
1187 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1188 		return ERR_PTR(-EINVAL);
1189 	}
1190 
1191 	size = PAGE_ALIGN(dmabuf->size);
1192 
1193 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1194 	if (ret)
1195 		return ERR_PTR(ret);
1196 
1197 	drm_gem_private_object_init(dev, obj, size);
1198 
1199 	npages = size / PAGE_SIZE;
1200 
1201 	msm_obj = to_msm_bo(obj);
1202 	msm_gem_lock(obj);
1203 	msm_obj->sgt = sgt;
1204 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1205 	if (!msm_obj->pages) {
1206 		msm_gem_unlock(obj);
1207 		ret = -ENOMEM;
1208 		goto fail;
1209 	}
1210 
1211 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1212 	if (ret) {
1213 		msm_gem_unlock(obj);
1214 		goto fail;
1215 	}
1216 
1217 	msm_gem_unlock(obj);
1218 
1219 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1220 
1221 	mutex_lock(&priv->obj_lock);
1222 	list_add_tail(&msm_obj->node, &priv->objects);
1223 	mutex_unlock(&priv->obj_lock);
1224 
1225 	return obj;
1226 
1227 fail:
1228 	drm_gem_object_put(obj);
1229 	return ERR_PTR(ret);
1230 }
1231 
1232 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1233 		uint32_t flags, struct msm_gem_address_space *aspace,
1234 		struct drm_gem_object **bo, uint64_t *iova)
1235 {
1236 	void *vaddr;
1237 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1238 	int ret;
1239 
1240 	if (IS_ERR(obj))
1241 		return ERR_CAST(obj);
1242 
1243 	if (iova) {
1244 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1245 		if (ret)
1246 			goto err;
1247 	}
1248 
1249 	vaddr = msm_gem_get_vaddr(obj);
1250 	if (IS_ERR(vaddr)) {
1251 		msm_gem_unpin_iova(obj, aspace);
1252 		ret = PTR_ERR(vaddr);
1253 		goto err;
1254 	}
1255 
1256 	if (bo)
1257 		*bo = obj;
1258 
1259 	return vaddr;
1260 err:
1261 	drm_gem_object_put(obj);
1262 
1263 	return ERR_PTR(ret);
1264 
1265 }
1266 
1267 void msm_gem_kernel_put(struct drm_gem_object *bo,
1268 		struct msm_gem_address_space *aspace)
1269 {
1270 	if (IS_ERR_OR_NULL(bo))
1271 		return;
1272 
1273 	msm_gem_put_vaddr(bo);
1274 	msm_gem_unpin_iova(bo, aspace);
1275 	drm_gem_object_put(bo);
1276 }
1277 
1278 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1279 {
1280 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1281 	va_list ap;
1282 
1283 	if (!fmt)
1284 		return;
1285 
1286 	va_start(ap, fmt);
1287 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1288 	va_end(ap);
1289 }
1290