xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 2ee4b5d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21 
22 static void update_inactive(struct msm_gem_object *msm_obj);
23 
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 {
26 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 	struct msm_drm_private *priv = obj->dev->dev_private;
28 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 			priv->vram.paddr;
30 }
31 
32 static bool use_pages(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	return !msm_obj->vram_node;
36 }
37 
38 /*
39  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40  * API.  Really GPU cache is out of scope here (handled on cmdstream)
41  * and all we need to do is invalidate newly allocated pages before
42  * mapping to CPU as uncached/writecombine.
43  *
44  * On top of this, we have the added headache, that depending on
45  * display generation, the display's iommu may be wired up to either
46  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47  * that here we either have dma-direct or iommu ops.
48  *
49  * Let this be a cautionary tail of abstraction gone wrong.
50  */
51 
52 static void sync_for_device(struct msm_gem_object *msm_obj)
53 {
54 	struct device *dev = msm_obj->base.dev->dev;
55 
56 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 }
58 
59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 {
61 	struct device *dev = msm_obj->base.dev->dev;
62 
63 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 }
65 
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 {
69 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 	struct msm_drm_private *priv = obj->dev->dev_private;
71 	dma_addr_t paddr;
72 	struct page **p;
73 	int ret, i;
74 
75 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 	if (!p)
77 		return ERR_PTR(-ENOMEM);
78 
79 	spin_lock(&priv->vram.lock);
80 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 	spin_unlock(&priv->vram.lock);
82 	if (ret) {
83 		kvfree(p);
84 		return ERR_PTR(ret);
85 	}
86 
87 	paddr = physaddr(obj);
88 	for (i = 0; i < npages; i++) {
89 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
90 		paddr += PAGE_SIZE;
91 	}
92 
93 	return p;
94 }
95 
96 static struct page **get_pages(struct drm_gem_object *obj)
97 {
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 
100 	GEM_WARN_ON(!msm_gem_is_locked(obj));
101 
102 	if (!msm_obj->pages) {
103 		struct drm_device *dev = obj->dev;
104 		struct page **p;
105 		int npages = obj->size >> PAGE_SHIFT;
106 
107 		if (use_pages(obj))
108 			p = drm_gem_get_pages(obj);
109 		else
110 			p = get_pages_vram(obj, npages);
111 
112 		if (IS_ERR(p)) {
113 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114 					PTR_ERR(p));
115 			return p;
116 		}
117 
118 		msm_obj->pages = p;
119 
120 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 		if (IS_ERR(msm_obj->sgt)) {
122 			void *ptr = ERR_CAST(msm_obj->sgt);
123 
124 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
125 			msm_obj->sgt = NULL;
126 			return ptr;
127 		}
128 
129 		/* For non-cached buffers, ensure the new pages are clean
130 		 * because display controller, GPU, etc. are not coherent:
131 		 */
132 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
133 			sync_for_device(msm_obj);
134 
135 		update_inactive(msm_obj);
136 	}
137 
138 	return msm_obj->pages;
139 }
140 
141 static void put_pages_vram(struct drm_gem_object *obj)
142 {
143 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 	struct msm_drm_private *priv = obj->dev->dev_private;
145 
146 	spin_lock(&priv->vram.lock);
147 	drm_mm_remove_node(msm_obj->vram_node);
148 	spin_unlock(&priv->vram.lock);
149 
150 	kvfree(msm_obj->pages);
151 }
152 
153 static void put_pages(struct drm_gem_object *obj)
154 {
155 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156 
157 	if (msm_obj->pages) {
158 		if (msm_obj->sgt) {
159 			/* For non-cached buffers, ensure the new
160 			 * pages are clean because display controller,
161 			 * GPU, etc. are not coherent:
162 			 */
163 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
164 				sync_for_cpu(msm_obj);
165 
166 			sg_free_table(msm_obj->sgt);
167 			kfree(msm_obj->sgt);
168 			msm_obj->sgt = NULL;
169 		}
170 
171 		if (use_pages(obj))
172 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 		else
174 			put_pages_vram(obj);
175 
176 		msm_obj->pages = NULL;
177 	}
178 }
179 
180 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
181 {
182 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
183 	struct page **p;
184 
185 	msm_gem_lock(obj);
186 
187 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188 		msm_gem_unlock(obj);
189 		return ERR_PTR(-EBUSY);
190 	}
191 
192 	p = get_pages(obj);
193 
194 	if (!IS_ERR(p)) {
195 		msm_obj->pin_count++;
196 		update_inactive(msm_obj);
197 	}
198 
199 	msm_gem_unlock(obj);
200 	return p;
201 }
202 
203 void msm_gem_put_pages(struct drm_gem_object *obj)
204 {
205 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
206 
207 	msm_gem_lock(obj);
208 	msm_obj->pin_count--;
209 	GEM_WARN_ON(msm_obj->pin_count < 0);
210 	update_inactive(msm_obj);
211 	msm_gem_unlock(obj);
212 }
213 
214 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
215 {
216 	if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
217 		return pgprot_writecombine(prot);
218 	return prot;
219 }
220 
221 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
222 {
223 	struct vm_area_struct *vma = vmf->vma;
224 	struct drm_gem_object *obj = vma->vm_private_data;
225 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
226 	struct page **pages;
227 	unsigned long pfn;
228 	pgoff_t pgoff;
229 	int err;
230 	vm_fault_t ret;
231 
232 	/*
233 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
234 	 * a reference on obj. So, we dont need to hold one here.
235 	 */
236 	err = msm_gem_lock_interruptible(obj);
237 	if (err) {
238 		ret = VM_FAULT_NOPAGE;
239 		goto out;
240 	}
241 
242 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
243 		msm_gem_unlock(obj);
244 		return VM_FAULT_SIGBUS;
245 	}
246 
247 	/* make sure we have pages attached now */
248 	pages = get_pages(obj);
249 	if (IS_ERR(pages)) {
250 		ret = vmf_error(PTR_ERR(pages));
251 		goto out_unlock;
252 	}
253 
254 	/* We don't use vmf->pgoff since that has the fake offset: */
255 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
256 
257 	pfn = page_to_pfn(pages[pgoff]);
258 
259 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
260 			pfn, pfn << PAGE_SHIFT);
261 
262 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
263 out_unlock:
264 	msm_gem_unlock(obj);
265 out:
266 	return ret;
267 }
268 
269 /** get mmap offset */
270 static uint64_t mmap_offset(struct drm_gem_object *obj)
271 {
272 	struct drm_device *dev = obj->dev;
273 	int ret;
274 
275 	GEM_WARN_ON(!msm_gem_is_locked(obj));
276 
277 	/* Make it mmapable */
278 	ret = drm_gem_create_mmap_offset(obj);
279 
280 	if (ret) {
281 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
282 		return 0;
283 	}
284 
285 	return drm_vma_node_offset_addr(&obj->vma_node);
286 }
287 
288 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
289 {
290 	uint64_t offset;
291 
292 	msm_gem_lock(obj);
293 	offset = mmap_offset(obj);
294 	msm_gem_unlock(obj);
295 	return offset;
296 }
297 
298 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
299 		struct msm_gem_address_space *aspace)
300 {
301 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
302 	struct msm_gem_vma *vma;
303 
304 	GEM_WARN_ON(!msm_gem_is_locked(obj));
305 
306 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
307 	if (!vma)
308 		return ERR_PTR(-ENOMEM);
309 
310 	vma->aspace = aspace;
311 
312 	list_add_tail(&vma->list, &msm_obj->vmas);
313 
314 	return vma;
315 }
316 
317 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
318 		struct msm_gem_address_space *aspace)
319 {
320 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
321 	struct msm_gem_vma *vma;
322 
323 	GEM_WARN_ON(!msm_gem_is_locked(obj));
324 
325 	list_for_each_entry(vma, &msm_obj->vmas, list) {
326 		if (vma->aspace == aspace)
327 			return vma;
328 	}
329 
330 	return NULL;
331 }
332 
333 static void del_vma(struct msm_gem_vma *vma)
334 {
335 	if (!vma)
336 		return;
337 
338 	list_del(&vma->list);
339 	kfree(vma);
340 }
341 
342 /*
343  * If close is true, this also closes the VMA (releasing the allocated
344  * iova range) in addition to removing the iommu mapping.  In the eviction
345  * case (!close), we keep the iova allocated, but only remove the iommu
346  * mapping.
347  */
348 static void
349 put_iova_spaces(struct drm_gem_object *obj, bool close)
350 {
351 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
352 	struct msm_gem_vma *vma;
353 
354 	GEM_WARN_ON(!msm_gem_is_locked(obj));
355 
356 	list_for_each_entry(vma, &msm_obj->vmas, list) {
357 		if (vma->aspace) {
358 			msm_gem_purge_vma(vma->aspace, vma);
359 			if (close)
360 				msm_gem_close_vma(vma->aspace, vma);
361 		}
362 	}
363 }
364 
365 /* Called with msm_obj locked */
366 static void
367 put_iova_vmas(struct drm_gem_object *obj)
368 {
369 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
370 	struct msm_gem_vma *vma, *tmp;
371 
372 	GEM_WARN_ON(!msm_gem_is_locked(obj));
373 
374 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
375 		del_vma(vma);
376 	}
377 }
378 
379 static int get_iova_locked(struct drm_gem_object *obj,
380 		struct msm_gem_address_space *aspace, uint64_t *iova,
381 		u64 range_start, u64 range_end)
382 {
383 	struct msm_gem_vma *vma;
384 	int ret = 0;
385 
386 	GEM_WARN_ON(!msm_gem_is_locked(obj));
387 
388 	vma = lookup_vma(obj, aspace);
389 
390 	if (!vma) {
391 		vma = add_vma(obj, aspace);
392 		if (IS_ERR(vma))
393 			return PTR_ERR(vma);
394 
395 		ret = msm_gem_init_vma(aspace, vma, obj->size,
396 			range_start, range_end);
397 		if (ret) {
398 			del_vma(vma);
399 			return ret;
400 		}
401 	}
402 
403 	*iova = vma->iova;
404 	return 0;
405 }
406 
407 static int msm_gem_pin_iova(struct drm_gem_object *obj,
408 		struct msm_gem_address_space *aspace)
409 {
410 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
411 	struct msm_gem_vma *vma;
412 	struct page **pages;
413 	int ret, prot = IOMMU_READ;
414 
415 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
416 		prot |= IOMMU_WRITE;
417 
418 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
419 		prot |= IOMMU_PRIV;
420 
421 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
422 		prot |= IOMMU_CACHE;
423 
424 	GEM_WARN_ON(!msm_gem_is_locked(obj));
425 
426 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
427 		return -EBUSY;
428 
429 	vma = lookup_vma(obj, aspace);
430 	if (GEM_WARN_ON(!vma))
431 		return -EINVAL;
432 
433 	pages = get_pages(obj);
434 	if (IS_ERR(pages))
435 		return PTR_ERR(pages);
436 
437 	ret = msm_gem_map_vma(aspace, vma, prot, msm_obj->sgt, obj->size);
438 
439 	if (!ret)
440 		msm_obj->pin_count++;
441 
442 	return ret;
443 }
444 
445 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
446 		struct msm_gem_address_space *aspace, uint64_t *iova,
447 		u64 range_start, u64 range_end)
448 {
449 	u64 local;
450 	int ret;
451 
452 	GEM_WARN_ON(!msm_gem_is_locked(obj));
453 
454 	ret = get_iova_locked(obj, aspace, &local,
455 		range_start, range_end);
456 
457 	if (!ret)
458 		ret = msm_gem_pin_iova(obj, aspace);
459 
460 	if (!ret)
461 		*iova = local;
462 
463 	return ret;
464 }
465 
466 /*
467  * get iova and pin it. Should have a matching put
468  * limits iova to specified range (in pages)
469  */
470 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
471 		struct msm_gem_address_space *aspace, uint64_t *iova,
472 		u64 range_start, u64 range_end)
473 {
474 	int ret;
475 
476 	msm_gem_lock(obj);
477 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
478 	msm_gem_unlock(obj);
479 
480 	return ret;
481 }
482 
483 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
484 		struct msm_gem_address_space *aspace, uint64_t *iova)
485 {
486 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
487 }
488 
489 /* get iova and pin it. Should have a matching put */
490 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
491 		struct msm_gem_address_space *aspace, uint64_t *iova)
492 {
493 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
494 }
495 
496 /*
497  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
498  * valid for the life of the object
499  */
500 int msm_gem_get_iova(struct drm_gem_object *obj,
501 		struct msm_gem_address_space *aspace, uint64_t *iova)
502 {
503 	int ret;
504 
505 	msm_gem_lock(obj);
506 	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
507 	msm_gem_unlock(obj);
508 
509 	return ret;
510 }
511 
512 /* get iova without taking a reference, used in places where you have
513  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
514  */
515 uint64_t msm_gem_iova(struct drm_gem_object *obj,
516 		struct msm_gem_address_space *aspace)
517 {
518 	struct msm_gem_vma *vma;
519 
520 	msm_gem_lock(obj);
521 	vma = lookup_vma(obj, aspace);
522 	msm_gem_unlock(obj);
523 	GEM_WARN_ON(!vma);
524 
525 	return vma ? vma->iova : 0;
526 }
527 
528 /*
529  * Locked variant of msm_gem_unpin_iova()
530  */
531 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
532 		struct msm_gem_address_space *aspace)
533 {
534 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
535 	struct msm_gem_vma *vma;
536 
537 	GEM_WARN_ON(!msm_gem_is_locked(obj));
538 
539 	vma = lookup_vma(obj, aspace);
540 
541 	if (!GEM_WARN_ON(!vma)) {
542 		msm_gem_unmap_vma(aspace, vma);
543 
544 		msm_obj->pin_count--;
545 		GEM_WARN_ON(msm_obj->pin_count < 0);
546 
547 		update_inactive(msm_obj);
548 	}
549 }
550 
551 /*
552  * Unpin a iova by updating the reference counts. The memory isn't actually
553  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
554  * to get rid of it
555  */
556 void msm_gem_unpin_iova(struct drm_gem_object *obj,
557 		struct msm_gem_address_space *aspace)
558 {
559 	msm_gem_lock(obj);
560 	msm_gem_unpin_iova_locked(obj, aspace);
561 	msm_gem_unlock(obj);
562 }
563 
564 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
565 		struct drm_mode_create_dumb *args)
566 {
567 	args->pitch = align_pitch(args->width, args->bpp);
568 	args->size  = PAGE_ALIGN(args->pitch * args->height);
569 	return msm_gem_new_handle(dev, file, args->size,
570 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
571 }
572 
573 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
574 		uint32_t handle, uint64_t *offset)
575 {
576 	struct drm_gem_object *obj;
577 	int ret = 0;
578 
579 	/* GEM does all our handle to object mapping */
580 	obj = drm_gem_object_lookup(file, handle);
581 	if (obj == NULL) {
582 		ret = -ENOENT;
583 		goto fail;
584 	}
585 
586 	*offset = msm_gem_mmap_offset(obj);
587 
588 	drm_gem_object_put(obj);
589 
590 fail:
591 	return ret;
592 }
593 
594 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
595 {
596 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
597 	int ret = 0;
598 
599 	GEM_WARN_ON(!msm_gem_is_locked(obj));
600 
601 	if (obj->import_attach)
602 		return ERR_PTR(-ENODEV);
603 
604 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
605 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
606 			msm_obj->madv, madv);
607 		return ERR_PTR(-EBUSY);
608 	}
609 
610 	/* increment vmap_count *before* vmap() call, so shrinker can
611 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
612 	 * This guarantees that we won't try to msm_gem_vunmap() this
613 	 * same object from within the vmap() call (while we already
614 	 * hold msm_obj lock)
615 	 */
616 	msm_obj->vmap_count++;
617 
618 	if (!msm_obj->vaddr) {
619 		struct page **pages = get_pages(obj);
620 		if (IS_ERR(pages)) {
621 			ret = PTR_ERR(pages);
622 			goto fail;
623 		}
624 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
625 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
626 		if (msm_obj->vaddr == NULL) {
627 			ret = -ENOMEM;
628 			goto fail;
629 		}
630 
631 		update_inactive(msm_obj);
632 	}
633 
634 	return msm_obj->vaddr;
635 
636 fail:
637 	msm_obj->vmap_count--;
638 	return ERR_PTR(ret);
639 }
640 
641 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
642 {
643 	return get_vaddr(obj, MSM_MADV_WILLNEED);
644 }
645 
646 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
647 {
648 	void *ret;
649 
650 	msm_gem_lock(obj);
651 	ret = msm_gem_get_vaddr_locked(obj);
652 	msm_gem_unlock(obj);
653 
654 	return ret;
655 }
656 
657 /*
658  * Don't use this!  It is for the very special case of dumping
659  * submits from GPU hangs or faults, were the bo may already
660  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
661  * active list.
662  */
663 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
664 {
665 	return get_vaddr(obj, __MSM_MADV_PURGED);
666 }
667 
668 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
669 {
670 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
671 
672 	GEM_WARN_ON(!msm_gem_is_locked(obj));
673 	GEM_WARN_ON(msm_obj->vmap_count < 1);
674 
675 	msm_obj->vmap_count--;
676 }
677 
678 void msm_gem_put_vaddr(struct drm_gem_object *obj)
679 {
680 	msm_gem_lock(obj);
681 	msm_gem_put_vaddr_locked(obj);
682 	msm_gem_unlock(obj);
683 }
684 
685 /* Update madvise status, returns true if not purged, else
686  * false or -errno.
687  */
688 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
689 {
690 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
691 
692 	msm_gem_lock(obj);
693 
694 	if (msm_obj->madv != __MSM_MADV_PURGED)
695 		msm_obj->madv = madv;
696 
697 	madv = msm_obj->madv;
698 
699 	/* If the obj is inactive, we might need to move it
700 	 * between inactive lists
701 	 */
702 	if (msm_obj->active_count == 0)
703 		update_inactive(msm_obj);
704 
705 	msm_gem_unlock(obj);
706 
707 	return (madv != __MSM_MADV_PURGED);
708 }
709 
710 void msm_gem_purge(struct drm_gem_object *obj)
711 {
712 	struct drm_device *dev = obj->dev;
713 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
714 
715 	GEM_WARN_ON(!msm_gem_is_locked(obj));
716 	GEM_WARN_ON(!is_purgeable(msm_obj));
717 
718 	/* Get rid of any iommu mapping(s): */
719 	put_iova_spaces(obj, true);
720 
721 	msm_gem_vunmap(obj);
722 
723 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
724 
725 	put_pages(obj);
726 
727 	put_iova_vmas(obj);
728 
729 	msm_obj->madv = __MSM_MADV_PURGED;
730 	update_inactive(msm_obj);
731 
732 	drm_gem_free_mmap_offset(obj);
733 
734 	/* Our goal here is to return as much of the memory as
735 	 * is possible back to the system as we are called from OOM.
736 	 * To do this we must instruct the shmfs to drop all of its
737 	 * backing pages, *now*.
738 	 */
739 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
740 
741 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
742 			0, (loff_t)-1);
743 }
744 
745 /*
746  * Unpin the backing pages and make them available to be swapped out.
747  */
748 void msm_gem_evict(struct drm_gem_object *obj)
749 {
750 	struct drm_device *dev = obj->dev;
751 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
752 
753 	GEM_WARN_ON(!msm_gem_is_locked(obj));
754 	GEM_WARN_ON(is_unevictable(msm_obj));
755 	GEM_WARN_ON(!msm_obj->evictable);
756 	GEM_WARN_ON(msm_obj->active_count);
757 
758 	/* Get rid of any iommu mapping(s): */
759 	put_iova_spaces(obj, false);
760 
761 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
762 
763 	put_pages(obj);
764 
765 	update_inactive(msm_obj);
766 }
767 
768 void msm_gem_vunmap(struct drm_gem_object *obj)
769 {
770 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
771 
772 	GEM_WARN_ON(!msm_gem_is_locked(obj));
773 
774 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
775 		return;
776 
777 	vunmap(msm_obj->vaddr);
778 	msm_obj->vaddr = NULL;
779 }
780 
781 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
782 {
783 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
784 	struct msm_drm_private *priv = obj->dev->dev_private;
785 
786 	might_sleep();
787 	GEM_WARN_ON(!msm_gem_is_locked(obj));
788 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
789 	GEM_WARN_ON(msm_obj->dontneed);
790 
791 	if (msm_obj->active_count++ == 0) {
792 		mutex_lock(&priv->mm_lock);
793 		if (msm_obj->evictable)
794 			mark_unevictable(msm_obj);
795 		list_move_tail(&msm_obj->mm_list, &gpu->active_list);
796 		mutex_unlock(&priv->mm_lock);
797 	}
798 }
799 
800 void msm_gem_active_put(struct drm_gem_object *obj)
801 {
802 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
803 
804 	might_sleep();
805 	GEM_WARN_ON(!msm_gem_is_locked(obj));
806 
807 	if (--msm_obj->active_count == 0) {
808 		update_inactive(msm_obj);
809 	}
810 }
811 
812 static void update_inactive(struct msm_gem_object *msm_obj)
813 {
814 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
815 
816 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
817 
818 	if (msm_obj->active_count != 0)
819 		return;
820 
821 	mutex_lock(&priv->mm_lock);
822 
823 	if (msm_obj->dontneed)
824 		mark_unpurgeable(msm_obj);
825 	if (msm_obj->evictable)
826 		mark_unevictable(msm_obj);
827 
828 	list_del(&msm_obj->mm_list);
829 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
830 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
831 		mark_evictable(msm_obj);
832 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
833 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
834 		mark_purgeable(msm_obj);
835 	} else {
836 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
837 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
838 	}
839 
840 	mutex_unlock(&priv->mm_lock);
841 }
842 
843 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
844 {
845 	bool write = !!(op & MSM_PREP_WRITE);
846 	unsigned long remain =
847 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
848 	long ret;
849 
850 	ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
851 	if (ret == 0)
852 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
853 	else if (ret < 0)
854 		return ret;
855 
856 	/* TODO cache maintenance */
857 
858 	return 0;
859 }
860 
861 int msm_gem_cpu_fini(struct drm_gem_object *obj)
862 {
863 	/* TODO cache maintenance */
864 	return 0;
865 }
866 
867 #ifdef CONFIG_DEBUG_FS
868 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
869 		struct msm_gem_stats *stats)
870 {
871 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
872 	struct dma_resv *robj = obj->resv;
873 	struct msm_gem_vma *vma;
874 	uint64_t off = drm_vma_node_start(&obj->vma_node);
875 	const char *madv;
876 
877 	msm_gem_lock(obj);
878 
879 	stats->all.count++;
880 	stats->all.size += obj->size;
881 
882 	if (is_active(msm_obj)) {
883 		stats->active.count++;
884 		stats->active.size += obj->size;
885 	}
886 
887 	if (msm_obj->pages) {
888 		stats->resident.count++;
889 		stats->resident.size += obj->size;
890 	}
891 
892 	switch (msm_obj->madv) {
893 	case __MSM_MADV_PURGED:
894 		stats->purged.count++;
895 		stats->purged.size += obj->size;
896 		madv = " purged";
897 		break;
898 	case MSM_MADV_DONTNEED:
899 		stats->purgeable.count++;
900 		stats->purgeable.size += obj->size;
901 		madv = " purgeable";
902 		break;
903 	case MSM_MADV_WILLNEED:
904 	default:
905 		madv = "";
906 		break;
907 	}
908 
909 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
910 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
911 			obj->name, kref_read(&obj->refcount),
912 			off, msm_obj->vaddr);
913 
914 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
915 
916 	if (!list_empty(&msm_obj->vmas)) {
917 
918 		seq_puts(m, "      vmas:");
919 
920 		list_for_each_entry(vma, &msm_obj->vmas, list) {
921 			const char *name, *comm;
922 			if (vma->aspace) {
923 				struct msm_gem_address_space *aspace = vma->aspace;
924 				struct task_struct *task =
925 					get_pid_task(aspace->pid, PIDTYPE_PID);
926 				if (task) {
927 					comm = kstrdup(task->comm, GFP_KERNEL);
928 				} else {
929 					comm = NULL;
930 				}
931 				name = aspace->name;
932 			} else {
933 				name = comm = NULL;
934 			}
935 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
936 				name, comm ? ":" : "", comm ? comm : "",
937 				vma->aspace, vma->iova,
938 				vma->mapped ? "mapped" : "unmapped",
939 				msm_gem_vma_inuse(vma));
940 			kfree(comm);
941 		}
942 
943 		seq_puts(m, "\n");
944 	}
945 
946 	dma_resv_describe(robj, m);
947 	msm_gem_unlock(obj);
948 }
949 
950 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
951 {
952 	struct msm_gem_stats stats = {};
953 	struct msm_gem_object *msm_obj;
954 
955 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
956 	list_for_each_entry(msm_obj, list, node) {
957 		struct drm_gem_object *obj = &msm_obj->base;
958 		seq_puts(m, "   ");
959 		msm_gem_describe(obj, m, &stats);
960 	}
961 
962 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
963 			stats.all.count, stats.all.size);
964 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
965 			stats.active.count, stats.active.size);
966 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
967 			stats.resident.count, stats.resident.size);
968 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
969 			stats.purgeable.count, stats.purgeable.size);
970 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
971 			stats.purged.count, stats.purged.size);
972 }
973 #endif
974 
975 /* don't call directly!  Use drm_gem_object_put() */
976 void msm_gem_free_object(struct drm_gem_object *obj)
977 {
978 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
979 	struct drm_device *dev = obj->dev;
980 	struct msm_drm_private *priv = dev->dev_private;
981 
982 	mutex_lock(&priv->obj_lock);
983 	list_del(&msm_obj->node);
984 	mutex_unlock(&priv->obj_lock);
985 
986 	mutex_lock(&priv->mm_lock);
987 	if (msm_obj->dontneed)
988 		mark_unpurgeable(msm_obj);
989 	list_del(&msm_obj->mm_list);
990 	mutex_unlock(&priv->mm_lock);
991 
992 	msm_gem_lock(obj);
993 
994 	/* object should not be on active list: */
995 	GEM_WARN_ON(is_active(msm_obj));
996 
997 	put_iova_spaces(obj, true);
998 
999 	if (obj->import_attach) {
1000 		GEM_WARN_ON(msm_obj->vaddr);
1001 
1002 		/* Don't drop the pages for imported dmabuf, as they are not
1003 		 * ours, just free the array we allocated:
1004 		 */
1005 		kvfree(msm_obj->pages);
1006 
1007 		put_iova_vmas(obj);
1008 
1009 		/* dma_buf_detach() grabs resv lock, so we need to unlock
1010 		 * prior to drm_prime_gem_destroy
1011 		 */
1012 		msm_gem_unlock(obj);
1013 
1014 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1015 	} else {
1016 		msm_gem_vunmap(obj);
1017 		put_pages(obj);
1018 		put_iova_vmas(obj);
1019 		msm_gem_unlock(obj);
1020 	}
1021 
1022 	drm_gem_object_release(obj);
1023 
1024 	kfree(msm_obj);
1025 }
1026 
1027 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1028 {
1029 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1030 
1031 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
1032 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1033 
1034 	return 0;
1035 }
1036 
1037 /* convenience method to construct a GEM buffer object, and userspace handle */
1038 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1039 		uint32_t size, uint32_t flags, uint32_t *handle,
1040 		char *name)
1041 {
1042 	struct drm_gem_object *obj;
1043 	int ret;
1044 
1045 	obj = msm_gem_new(dev, size, flags);
1046 
1047 	if (IS_ERR(obj))
1048 		return PTR_ERR(obj);
1049 
1050 	if (name)
1051 		msm_gem_object_set_name(obj, "%s", name);
1052 
1053 	ret = drm_gem_handle_create(file, obj, handle);
1054 
1055 	/* drop reference from allocate - handle holds it now */
1056 	drm_gem_object_put(obj);
1057 
1058 	return ret;
1059 }
1060 
1061 static const struct vm_operations_struct vm_ops = {
1062 	.fault = msm_gem_fault,
1063 	.open = drm_gem_vm_open,
1064 	.close = drm_gem_vm_close,
1065 };
1066 
1067 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1068 	.free = msm_gem_free_object,
1069 	.pin = msm_gem_prime_pin,
1070 	.unpin = msm_gem_prime_unpin,
1071 	.get_sg_table = msm_gem_prime_get_sg_table,
1072 	.vmap = msm_gem_prime_vmap,
1073 	.vunmap = msm_gem_prime_vunmap,
1074 	.mmap = msm_gem_object_mmap,
1075 	.vm_ops = &vm_ops,
1076 };
1077 
1078 static int msm_gem_new_impl(struct drm_device *dev,
1079 		uint32_t size, uint32_t flags,
1080 		struct drm_gem_object **obj)
1081 {
1082 	struct msm_drm_private *priv = dev->dev_private;
1083 	struct msm_gem_object *msm_obj;
1084 
1085 	switch (flags & MSM_BO_CACHE_MASK) {
1086 	case MSM_BO_UNCACHED:
1087 	case MSM_BO_CACHED:
1088 	case MSM_BO_WC:
1089 		break;
1090 	case MSM_BO_CACHED_COHERENT:
1091 		if (priv->has_cached_coherent)
1092 			break;
1093 		fallthrough;
1094 	default:
1095 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1096 				(flags & MSM_BO_CACHE_MASK));
1097 		return -EINVAL;
1098 	}
1099 
1100 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1101 	if (!msm_obj)
1102 		return -ENOMEM;
1103 
1104 	msm_obj->flags = flags;
1105 	msm_obj->madv = MSM_MADV_WILLNEED;
1106 
1107 	INIT_LIST_HEAD(&msm_obj->node);
1108 	INIT_LIST_HEAD(&msm_obj->vmas);
1109 
1110 	*obj = &msm_obj->base;
1111 	(*obj)->funcs = &msm_gem_object_funcs;
1112 
1113 	return 0;
1114 }
1115 
1116 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1117 {
1118 	struct msm_drm_private *priv = dev->dev_private;
1119 	struct msm_gem_object *msm_obj;
1120 	struct drm_gem_object *obj = NULL;
1121 	bool use_vram = false;
1122 	int ret;
1123 
1124 	size = PAGE_ALIGN(size);
1125 
1126 	if (!msm_use_mmu(dev))
1127 		use_vram = true;
1128 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1129 		use_vram = true;
1130 
1131 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1132 		return ERR_PTR(-EINVAL);
1133 
1134 	/* Disallow zero sized objects as they make the underlying
1135 	 * infrastructure grumpy
1136 	 */
1137 	if (size == 0)
1138 		return ERR_PTR(-EINVAL);
1139 
1140 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1141 	if (ret)
1142 		return ERR_PTR(ret);
1143 
1144 	msm_obj = to_msm_bo(obj);
1145 
1146 	if (use_vram) {
1147 		struct msm_gem_vma *vma;
1148 		struct page **pages;
1149 
1150 		drm_gem_private_object_init(dev, obj, size);
1151 
1152 		msm_gem_lock(obj);
1153 
1154 		vma = add_vma(obj, NULL);
1155 		msm_gem_unlock(obj);
1156 		if (IS_ERR(vma)) {
1157 			ret = PTR_ERR(vma);
1158 			goto fail;
1159 		}
1160 
1161 		to_msm_bo(obj)->vram_node = &vma->node;
1162 
1163 		/* Call chain get_pages() -> update_inactive() tries to
1164 		 * access msm_obj->mm_list, but it is not initialized yet.
1165 		 * To avoid NULL pointer dereference error, initialize
1166 		 * mm_list to be empty.
1167 		 */
1168 		INIT_LIST_HEAD(&msm_obj->mm_list);
1169 
1170 		msm_gem_lock(obj);
1171 		pages = get_pages(obj);
1172 		msm_gem_unlock(obj);
1173 		if (IS_ERR(pages)) {
1174 			ret = PTR_ERR(pages);
1175 			goto fail;
1176 		}
1177 
1178 		vma->iova = physaddr(obj);
1179 	} else {
1180 		ret = drm_gem_object_init(dev, obj, size);
1181 		if (ret)
1182 			goto fail;
1183 		/*
1184 		 * Our buffers are kept pinned, so allocating them from the
1185 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1186 		 * See comments above new_inode() why this is required _and_
1187 		 * expected if you're going to pin these pages.
1188 		 */
1189 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1190 	}
1191 
1192 	mutex_lock(&priv->mm_lock);
1193 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1194 	mutex_unlock(&priv->mm_lock);
1195 
1196 	mutex_lock(&priv->obj_lock);
1197 	list_add_tail(&msm_obj->node, &priv->objects);
1198 	mutex_unlock(&priv->obj_lock);
1199 
1200 	return obj;
1201 
1202 fail:
1203 	drm_gem_object_put(obj);
1204 	return ERR_PTR(ret);
1205 }
1206 
1207 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1208 		struct dma_buf *dmabuf, struct sg_table *sgt)
1209 {
1210 	struct msm_drm_private *priv = dev->dev_private;
1211 	struct msm_gem_object *msm_obj;
1212 	struct drm_gem_object *obj;
1213 	uint32_t size;
1214 	int ret, npages;
1215 
1216 	/* if we don't have IOMMU, don't bother pretending we can import: */
1217 	if (!msm_use_mmu(dev)) {
1218 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1219 		return ERR_PTR(-EINVAL);
1220 	}
1221 
1222 	size = PAGE_ALIGN(dmabuf->size);
1223 
1224 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1225 	if (ret)
1226 		return ERR_PTR(ret);
1227 
1228 	drm_gem_private_object_init(dev, obj, size);
1229 
1230 	npages = size / PAGE_SIZE;
1231 
1232 	msm_obj = to_msm_bo(obj);
1233 	msm_gem_lock(obj);
1234 	msm_obj->sgt = sgt;
1235 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1236 	if (!msm_obj->pages) {
1237 		msm_gem_unlock(obj);
1238 		ret = -ENOMEM;
1239 		goto fail;
1240 	}
1241 
1242 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1243 	if (ret) {
1244 		msm_gem_unlock(obj);
1245 		goto fail;
1246 	}
1247 
1248 	msm_gem_unlock(obj);
1249 
1250 	mutex_lock(&priv->mm_lock);
1251 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1252 	mutex_unlock(&priv->mm_lock);
1253 
1254 	mutex_lock(&priv->obj_lock);
1255 	list_add_tail(&msm_obj->node, &priv->objects);
1256 	mutex_unlock(&priv->obj_lock);
1257 
1258 	return obj;
1259 
1260 fail:
1261 	drm_gem_object_put(obj);
1262 	return ERR_PTR(ret);
1263 }
1264 
1265 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1266 		uint32_t flags, struct msm_gem_address_space *aspace,
1267 		struct drm_gem_object **bo, uint64_t *iova)
1268 {
1269 	void *vaddr;
1270 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1271 	int ret;
1272 
1273 	if (IS_ERR(obj))
1274 		return ERR_CAST(obj);
1275 
1276 	if (iova) {
1277 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1278 		if (ret)
1279 			goto err;
1280 	}
1281 
1282 	vaddr = msm_gem_get_vaddr(obj);
1283 	if (IS_ERR(vaddr)) {
1284 		msm_gem_unpin_iova(obj, aspace);
1285 		ret = PTR_ERR(vaddr);
1286 		goto err;
1287 	}
1288 
1289 	if (bo)
1290 		*bo = obj;
1291 
1292 	return vaddr;
1293 err:
1294 	drm_gem_object_put(obj);
1295 
1296 	return ERR_PTR(ret);
1297 
1298 }
1299 
1300 void msm_gem_kernel_put(struct drm_gem_object *bo,
1301 		struct msm_gem_address_space *aspace)
1302 {
1303 	if (IS_ERR_OR_NULL(bo))
1304 		return;
1305 
1306 	msm_gem_put_vaddr(bo);
1307 	msm_gem_unpin_iova(bo, aspace);
1308 	drm_gem_object_put(bo);
1309 }
1310 
1311 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1312 {
1313 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1314 	va_list ap;
1315 
1316 	if (!fmt)
1317 		return;
1318 
1319 	va_start(ap, fmt);
1320 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1321 	va_end(ap);
1322 }
1323