xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 6c7c8fb8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21 
22 static dma_addr_t physaddr(struct drm_gem_object *obj)
23 {
24 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
25 	struct msm_drm_private *priv = obj->dev->dev_private;
26 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
27 			priv->vram.paddr;
28 }
29 
30 static bool use_pages(struct drm_gem_object *obj)
31 {
32 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
33 	return !msm_obj->vram_node;
34 }
35 
36 /*
37  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
38  * API.  Really GPU cache is out of scope here (handled on cmdstream)
39  * and all we need to do is invalidate newly allocated pages before
40  * mapping to CPU as uncached/writecombine.
41  *
42  * On top of this, we have the added headache, that depending on
43  * display generation, the display's iommu may be wired up to either
44  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
45  * that here we either have dma-direct or iommu ops.
46  *
47  * Let this be a cautionary tail of abstraction gone wrong.
48  */
49 
50 static void sync_for_device(struct msm_gem_object *msm_obj)
51 {
52 	struct device *dev = msm_obj->base.dev->dev;
53 
54 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
55 }
56 
57 static void sync_for_cpu(struct msm_gem_object *msm_obj)
58 {
59 	struct device *dev = msm_obj->base.dev->dev;
60 
61 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
62 }
63 
64 static void update_lru_locked(struct drm_gem_object *obj)
65 {
66 	struct msm_drm_private *priv = obj->dev->dev_private;
67 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
68 
69 	msm_gem_assert_locked(&msm_obj->base);
70 
71 	if (!msm_obj->pages) {
72 		GEM_WARN_ON(msm_obj->pin_count);
73 
74 		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
75 	} else if (msm_obj->pin_count) {
76 		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
77 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
78 		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
79 	} else {
80 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
81 
82 		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
83 	}
84 }
85 
86 static void update_lru(struct drm_gem_object *obj)
87 {
88 	struct msm_drm_private *priv = obj->dev->dev_private;
89 
90 	mutex_lock(&priv->lru.lock);
91 	update_lru_locked(obj);
92 	mutex_unlock(&priv->lru.lock);
93 }
94 
95 /* allocate pages from VRAM carveout, used when no IOMMU: */
96 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
97 {
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 	struct msm_drm_private *priv = obj->dev->dev_private;
100 	dma_addr_t paddr;
101 	struct page **p;
102 	int ret, i;
103 
104 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
105 	if (!p)
106 		return ERR_PTR(-ENOMEM);
107 
108 	spin_lock(&priv->vram.lock);
109 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
110 	spin_unlock(&priv->vram.lock);
111 	if (ret) {
112 		kvfree(p);
113 		return ERR_PTR(ret);
114 	}
115 
116 	paddr = physaddr(obj);
117 	for (i = 0; i < npages; i++) {
118 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
119 		paddr += PAGE_SIZE;
120 	}
121 
122 	return p;
123 }
124 
125 static struct page **get_pages(struct drm_gem_object *obj)
126 {
127 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
128 
129 	msm_gem_assert_locked(obj);
130 
131 	if (!msm_obj->pages) {
132 		struct drm_device *dev = obj->dev;
133 		struct page **p;
134 		int npages = obj->size >> PAGE_SHIFT;
135 
136 		if (use_pages(obj))
137 			p = drm_gem_get_pages(obj);
138 		else
139 			p = get_pages_vram(obj, npages);
140 
141 		if (IS_ERR(p)) {
142 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
143 					PTR_ERR(p));
144 			return p;
145 		}
146 
147 		msm_obj->pages = p;
148 
149 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
150 		if (IS_ERR(msm_obj->sgt)) {
151 			void *ptr = ERR_CAST(msm_obj->sgt);
152 
153 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
154 			msm_obj->sgt = NULL;
155 			return ptr;
156 		}
157 
158 		/* For non-cached buffers, ensure the new pages are clean
159 		 * because display controller, GPU, etc. are not coherent:
160 		 */
161 		if (msm_obj->flags & MSM_BO_WC)
162 			sync_for_device(msm_obj);
163 
164 		update_lru(obj);
165 	}
166 
167 	return msm_obj->pages;
168 }
169 
170 static void put_pages_vram(struct drm_gem_object *obj)
171 {
172 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
173 	struct msm_drm_private *priv = obj->dev->dev_private;
174 
175 	spin_lock(&priv->vram.lock);
176 	drm_mm_remove_node(msm_obj->vram_node);
177 	spin_unlock(&priv->vram.lock);
178 
179 	kvfree(msm_obj->pages);
180 }
181 
182 static void put_pages(struct drm_gem_object *obj)
183 {
184 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
185 
186 	if (msm_obj->pages) {
187 		if (msm_obj->sgt) {
188 			/* For non-cached buffers, ensure the new
189 			 * pages are clean because display controller,
190 			 * GPU, etc. are not coherent:
191 			 */
192 			if (msm_obj->flags & MSM_BO_WC)
193 				sync_for_cpu(msm_obj);
194 
195 			sg_free_table(msm_obj->sgt);
196 			kfree(msm_obj->sgt);
197 			msm_obj->sgt = NULL;
198 		}
199 
200 		if (use_pages(obj))
201 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
202 		else
203 			put_pages_vram(obj);
204 
205 		msm_obj->pages = NULL;
206 		update_lru(obj);
207 	}
208 }
209 
210 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
211 {
212 	struct msm_drm_private *priv = obj->dev->dev_private;
213 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
214 	struct page **p;
215 
216 	msm_gem_assert_locked(obj);
217 
218 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
219 		return ERR_PTR(-EBUSY);
220 	}
221 
222 	p = get_pages(obj);
223 	if (IS_ERR(p))
224 		return p;
225 
226 	mutex_lock(&priv->lru.lock);
227 	msm_obj->pin_count++;
228 	update_lru_locked(obj);
229 	mutex_unlock(&priv->lru.lock);
230 
231 	return p;
232 }
233 
234 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
235 {
236 	struct page **p;
237 
238 	msm_gem_lock(obj);
239 	p = msm_gem_pin_pages_locked(obj);
240 	msm_gem_unlock(obj);
241 
242 	return p;
243 }
244 
245 void msm_gem_unpin_pages(struct drm_gem_object *obj)
246 {
247 	msm_gem_lock(obj);
248 	msm_gem_unpin_locked(obj);
249 	msm_gem_unlock(obj);
250 }
251 
252 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
253 {
254 	if (msm_obj->flags & MSM_BO_WC)
255 		return pgprot_writecombine(prot);
256 	return prot;
257 }
258 
259 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
260 {
261 	struct vm_area_struct *vma = vmf->vma;
262 	struct drm_gem_object *obj = vma->vm_private_data;
263 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
264 	struct page **pages;
265 	unsigned long pfn;
266 	pgoff_t pgoff;
267 	int err;
268 	vm_fault_t ret;
269 
270 	/*
271 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
272 	 * a reference on obj. So, we dont need to hold one here.
273 	 */
274 	err = msm_gem_lock_interruptible(obj);
275 	if (err) {
276 		ret = VM_FAULT_NOPAGE;
277 		goto out;
278 	}
279 
280 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
281 		msm_gem_unlock(obj);
282 		return VM_FAULT_SIGBUS;
283 	}
284 
285 	/* make sure we have pages attached now */
286 	pages = get_pages(obj);
287 	if (IS_ERR(pages)) {
288 		ret = vmf_error(PTR_ERR(pages));
289 		goto out_unlock;
290 	}
291 
292 	/* We don't use vmf->pgoff since that has the fake offset: */
293 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
294 
295 	pfn = page_to_pfn(pages[pgoff]);
296 
297 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
298 			pfn, pfn << PAGE_SHIFT);
299 
300 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
301 
302 out_unlock:
303 	msm_gem_unlock(obj);
304 out:
305 	return ret;
306 }
307 
308 /** get mmap offset */
309 static uint64_t mmap_offset(struct drm_gem_object *obj)
310 {
311 	struct drm_device *dev = obj->dev;
312 	int ret;
313 
314 	msm_gem_assert_locked(obj);
315 
316 	/* Make it mmapable */
317 	ret = drm_gem_create_mmap_offset(obj);
318 
319 	if (ret) {
320 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
321 		return 0;
322 	}
323 
324 	return drm_vma_node_offset_addr(&obj->vma_node);
325 }
326 
327 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
328 {
329 	uint64_t offset;
330 
331 	msm_gem_lock(obj);
332 	offset = mmap_offset(obj);
333 	msm_gem_unlock(obj);
334 	return offset;
335 }
336 
337 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
338 		struct msm_gem_address_space *aspace)
339 {
340 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
341 	struct msm_gem_vma *vma;
342 
343 	msm_gem_assert_locked(obj);
344 
345 	vma = msm_gem_vma_new(aspace);
346 	if (!vma)
347 		return ERR_PTR(-ENOMEM);
348 
349 	list_add_tail(&vma->list, &msm_obj->vmas);
350 
351 	return vma;
352 }
353 
354 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
355 		struct msm_gem_address_space *aspace)
356 {
357 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
358 	struct msm_gem_vma *vma;
359 
360 	msm_gem_assert_locked(obj);
361 
362 	list_for_each_entry(vma, &msm_obj->vmas, list) {
363 		if (vma->aspace == aspace)
364 			return vma;
365 	}
366 
367 	return NULL;
368 }
369 
370 static void del_vma(struct msm_gem_vma *vma)
371 {
372 	if (!vma)
373 		return;
374 
375 	list_del(&vma->list);
376 	kfree(vma);
377 }
378 
379 /*
380  * If close is true, this also closes the VMA (releasing the allocated
381  * iova range) in addition to removing the iommu mapping.  In the eviction
382  * case (!close), we keep the iova allocated, but only remove the iommu
383  * mapping.
384  */
385 static void
386 put_iova_spaces(struct drm_gem_object *obj, bool close)
387 {
388 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
389 	struct msm_gem_vma *vma;
390 
391 	msm_gem_assert_locked(obj);
392 
393 	list_for_each_entry(vma, &msm_obj->vmas, list) {
394 		if (vma->aspace) {
395 			msm_gem_vma_purge(vma);
396 			if (close)
397 				msm_gem_vma_close(vma);
398 		}
399 	}
400 }
401 
402 /* Called with msm_obj locked */
403 static void
404 put_iova_vmas(struct drm_gem_object *obj)
405 {
406 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
407 	struct msm_gem_vma *vma, *tmp;
408 
409 	msm_gem_assert_locked(obj);
410 
411 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
412 		del_vma(vma);
413 	}
414 }
415 
416 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
417 		struct msm_gem_address_space *aspace,
418 		u64 range_start, u64 range_end)
419 {
420 	struct msm_gem_vma *vma;
421 
422 	msm_gem_assert_locked(obj);
423 
424 	vma = lookup_vma(obj, aspace);
425 
426 	if (!vma) {
427 		int ret;
428 
429 		vma = add_vma(obj, aspace);
430 		if (IS_ERR(vma))
431 			return vma;
432 
433 		ret = msm_gem_vma_init(vma, obj->size,
434 			range_start, range_end);
435 		if (ret) {
436 			del_vma(vma);
437 			return ERR_PTR(ret);
438 		}
439 	} else {
440 		GEM_WARN_ON(vma->iova < range_start);
441 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
442 	}
443 
444 	return vma;
445 }
446 
447 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
448 {
449 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
450 	struct page **pages;
451 	int ret, prot = IOMMU_READ;
452 
453 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
454 		prot |= IOMMU_WRITE;
455 
456 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
457 		prot |= IOMMU_PRIV;
458 
459 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
460 		prot |= IOMMU_CACHE;
461 
462 	msm_gem_assert_locked(obj);
463 
464 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
465 		return -EBUSY;
466 
467 	pages = msm_gem_pin_pages_locked(obj);
468 	if (IS_ERR(pages))
469 		return PTR_ERR(pages);
470 
471 	ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
472 	if (ret)
473 		msm_gem_unpin_locked(obj);
474 
475 	return ret;
476 }
477 
478 void msm_gem_unpin_locked(struct drm_gem_object *obj)
479 {
480 	struct msm_drm_private *priv = obj->dev->dev_private;
481 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
482 
483 	msm_gem_assert_locked(obj);
484 
485 	mutex_lock(&priv->lru.lock);
486 	msm_obj->pin_count--;
487 	GEM_WARN_ON(msm_obj->pin_count < 0);
488 	update_lru_locked(obj);
489 	mutex_unlock(&priv->lru.lock);
490 }
491 
492 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
493 					   struct msm_gem_address_space *aspace)
494 {
495 	return get_vma_locked(obj, aspace, 0, U64_MAX);
496 }
497 
498 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
499 		struct msm_gem_address_space *aspace, uint64_t *iova,
500 		u64 range_start, u64 range_end)
501 {
502 	struct msm_gem_vma *vma;
503 	int ret;
504 
505 	msm_gem_assert_locked(obj);
506 
507 	vma = get_vma_locked(obj, aspace, range_start, range_end);
508 	if (IS_ERR(vma))
509 		return PTR_ERR(vma);
510 
511 	ret = msm_gem_pin_vma_locked(obj, vma);
512 	if (!ret)
513 		*iova = vma->iova;
514 
515 	return ret;
516 }
517 
518 /*
519  * get iova and pin it. Should have a matching put
520  * limits iova to specified range (in pages)
521  */
522 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
523 		struct msm_gem_address_space *aspace, uint64_t *iova,
524 		u64 range_start, u64 range_end)
525 {
526 	int ret;
527 
528 	msm_gem_lock(obj);
529 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
530 	msm_gem_unlock(obj);
531 
532 	return ret;
533 }
534 
535 /* get iova and pin it. Should have a matching put */
536 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
537 		struct msm_gem_address_space *aspace, uint64_t *iova)
538 {
539 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
540 }
541 
542 /*
543  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
544  * valid for the life of the object
545  */
546 int msm_gem_get_iova(struct drm_gem_object *obj,
547 		struct msm_gem_address_space *aspace, uint64_t *iova)
548 {
549 	struct msm_gem_vma *vma;
550 	int ret = 0;
551 
552 	msm_gem_lock(obj);
553 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
554 	if (IS_ERR(vma)) {
555 		ret = PTR_ERR(vma);
556 	} else {
557 		*iova = vma->iova;
558 	}
559 	msm_gem_unlock(obj);
560 
561 	return ret;
562 }
563 
564 static int clear_iova(struct drm_gem_object *obj,
565 		      struct msm_gem_address_space *aspace)
566 {
567 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
568 
569 	if (!vma)
570 		return 0;
571 
572 	if (msm_gem_vma_inuse(vma))
573 		return -EBUSY;
574 
575 	msm_gem_vma_purge(vma);
576 	msm_gem_vma_close(vma);
577 	del_vma(vma);
578 
579 	return 0;
580 }
581 
582 /*
583  * Get the requested iova but don't pin it.  Fails if the requested iova is
584  * not available.  Doesn't need a put because iovas are currently valid for
585  * the life of the object.
586  *
587  * Setting an iova of zero will clear the vma.
588  */
589 int msm_gem_set_iova(struct drm_gem_object *obj,
590 		     struct msm_gem_address_space *aspace, uint64_t iova)
591 {
592 	int ret = 0;
593 
594 	msm_gem_lock(obj);
595 	if (!iova) {
596 		ret = clear_iova(obj, aspace);
597 	} else {
598 		struct msm_gem_vma *vma;
599 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
600 		if (IS_ERR(vma)) {
601 			ret = PTR_ERR(vma);
602 		} else if (GEM_WARN_ON(vma->iova != iova)) {
603 			clear_iova(obj, aspace);
604 			ret = -EBUSY;
605 		}
606 	}
607 	msm_gem_unlock(obj);
608 
609 	return ret;
610 }
611 
612 /*
613  * Unpin a iova by updating the reference counts. The memory isn't actually
614  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
615  * to get rid of it
616  */
617 void msm_gem_unpin_iova(struct drm_gem_object *obj,
618 		struct msm_gem_address_space *aspace)
619 {
620 	struct msm_gem_vma *vma;
621 
622 	msm_gem_lock(obj);
623 	vma = lookup_vma(obj, aspace);
624 	if (!GEM_WARN_ON(!vma)) {
625 		msm_gem_vma_unpin(vma);
626 		msm_gem_unpin_locked(obj);
627 	}
628 	msm_gem_unlock(obj);
629 }
630 
631 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
632 		struct drm_mode_create_dumb *args)
633 {
634 	args->pitch = align_pitch(args->width, args->bpp);
635 	args->size  = PAGE_ALIGN(args->pitch * args->height);
636 	return msm_gem_new_handle(dev, file, args->size,
637 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
638 }
639 
640 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
641 		uint32_t handle, uint64_t *offset)
642 {
643 	struct drm_gem_object *obj;
644 	int ret = 0;
645 
646 	/* GEM does all our handle to object mapping */
647 	obj = drm_gem_object_lookup(file, handle);
648 	if (obj == NULL) {
649 		ret = -ENOENT;
650 		goto fail;
651 	}
652 
653 	*offset = msm_gem_mmap_offset(obj);
654 
655 	drm_gem_object_put(obj);
656 
657 fail:
658 	return ret;
659 }
660 
661 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
662 {
663 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
664 	struct page **pages;
665 	int ret = 0;
666 
667 	msm_gem_assert_locked(obj);
668 
669 	if (obj->import_attach)
670 		return ERR_PTR(-ENODEV);
671 
672 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
673 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
674 			msm_obj->madv, madv);
675 		return ERR_PTR(-EBUSY);
676 	}
677 
678 	pages = msm_gem_pin_pages_locked(obj);
679 	if (IS_ERR(pages))
680 		return ERR_CAST(pages);
681 
682 	/* increment vmap_count *before* vmap() call, so shrinker can
683 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
684 	 * This guarantees that we won't try to msm_gem_vunmap() this
685 	 * same object from within the vmap() call (while we already
686 	 * hold msm_obj lock)
687 	 */
688 	msm_obj->vmap_count++;
689 
690 	if (!msm_obj->vaddr) {
691 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
692 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
693 		if (msm_obj->vaddr == NULL) {
694 			ret = -ENOMEM;
695 			goto fail;
696 		}
697 	}
698 
699 	return msm_obj->vaddr;
700 
701 fail:
702 	msm_obj->vmap_count--;
703 	msm_gem_unpin_locked(obj);
704 	return ERR_PTR(ret);
705 }
706 
707 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
708 {
709 	return get_vaddr(obj, MSM_MADV_WILLNEED);
710 }
711 
712 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
713 {
714 	void *ret;
715 
716 	msm_gem_lock(obj);
717 	ret = msm_gem_get_vaddr_locked(obj);
718 	msm_gem_unlock(obj);
719 
720 	return ret;
721 }
722 
723 /*
724  * Don't use this!  It is for the very special case of dumping
725  * submits from GPU hangs or faults, were the bo may already
726  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
727  * active list.
728  */
729 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
730 {
731 	return get_vaddr(obj, __MSM_MADV_PURGED);
732 }
733 
734 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
735 {
736 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
737 
738 	msm_gem_assert_locked(obj);
739 	GEM_WARN_ON(msm_obj->vmap_count < 1);
740 
741 	msm_obj->vmap_count--;
742 	msm_gem_unpin_locked(obj);
743 }
744 
745 void msm_gem_put_vaddr(struct drm_gem_object *obj)
746 {
747 	msm_gem_lock(obj);
748 	msm_gem_put_vaddr_locked(obj);
749 	msm_gem_unlock(obj);
750 }
751 
752 /* Update madvise status, returns true if not purged, else
753  * false or -errno.
754  */
755 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
756 {
757 	struct msm_drm_private *priv = obj->dev->dev_private;
758 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
759 
760 	msm_gem_lock(obj);
761 
762 	mutex_lock(&priv->lru.lock);
763 
764 	if (msm_obj->madv != __MSM_MADV_PURGED)
765 		msm_obj->madv = madv;
766 
767 	madv = msm_obj->madv;
768 
769 	/* If the obj is inactive, we might need to move it
770 	 * between inactive lists
771 	 */
772 	update_lru_locked(obj);
773 
774 	mutex_unlock(&priv->lru.lock);
775 
776 	msm_gem_unlock(obj);
777 
778 	return (madv != __MSM_MADV_PURGED);
779 }
780 
781 void msm_gem_purge(struct drm_gem_object *obj)
782 {
783 	struct drm_device *dev = obj->dev;
784 	struct msm_drm_private *priv = obj->dev->dev_private;
785 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
786 
787 	msm_gem_assert_locked(obj);
788 	GEM_WARN_ON(!is_purgeable(msm_obj));
789 
790 	/* Get rid of any iommu mapping(s): */
791 	put_iova_spaces(obj, true);
792 
793 	msm_gem_vunmap(obj);
794 
795 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
796 
797 	put_pages(obj);
798 
799 	put_iova_vmas(obj);
800 
801 	mutex_lock(&priv->lru.lock);
802 	/* A one-way transition: */
803 	msm_obj->madv = __MSM_MADV_PURGED;
804 	mutex_unlock(&priv->lru.lock);
805 
806 	drm_gem_free_mmap_offset(obj);
807 
808 	/* Our goal here is to return as much of the memory as
809 	 * is possible back to the system as we are called from OOM.
810 	 * To do this we must instruct the shmfs to drop all of its
811 	 * backing pages, *now*.
812 	 */
813 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
814 
815 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
816 			0, (loff_t)-1);
817 }
818 
819 /*
820  * Unpin the backing pages and make them available to be swapped out.
821  */
822 void msm_gem_evict(struct drm_gem_object *obj)
823 {
824 	struct drm_device *dev = obj->dev;
825 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
826 
827 	msm_gem_assert_locked(obj);
828 	GEM_WARN_ON(is_unevictable(msm_obj));
829 
830 	/* Get rid of any iommu mapping(s): */
831 	put_iova_spaces(obj, false);
832 
833 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
834 
835 	put_pages(obj);
836 }
837 
838 void msm_gem_vunmap(struct drm_gem_object *obj)
839 {
840 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
841 
842 	msm_gem_assert_locked(obj);
843 
844 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
845 		return;
846 
847 	vunmap(msm_obj->vaddr);
848 	msm_obj->vaddr = NULL;
849 }
850 
851 bool msm_gem_active(struct drm_gem_object *obj)
852 {
853 	msm_gem_assert_locked(obj);
854 
855 	if (to_msm_bo(obj)->pin_count)
856 		return true;
857 
858 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
859 }
860 
861 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
862 {
863 	bool write = !!(op & MSM_PREP_WRITE);
864 	unsigned long remain =
865 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
866 	long ret;
867 
868 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
869 				    true,  remain);
870 	if (ret == 0)
871 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
872 	else if (ret < 0)
873 		return ret;
874 
875 	/* TODO cache maintenance */
876 
877 	return 0;
878 }
879 
880 int msm_gem_cpu_fini(struct drm_gem_object *obj)
881 {
882 	/* TODO cache maintenance */
883 	return 0;
884 }
885 
886 #ifdef CONFIG_DEBUG_FS
887 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
888 		struct msm_gem_stats *stats)
889 {
890 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
891 	struct dma_resv *robj = obj->resv;
892 	struct msm_gem_vma *vma;
893 	uint64_t off = drm_vma_node_start(&obj->vma_node);
894 	const char *madv;
895 
896 	msm_gem_lock(obj);
897 
898 	stats->all.count++;
899 	stats->all.size += obj->size;
900 
901 	if (msm_gem_active(obj)) {
902 		stats->active.count++;
903 		stats->active.size += obj->size;
904 	}
905 
906 	if (msm_obj->pages) {
907 		stats->resident.count++;
908 		stats->resident.size += obj->size;
909 	}
910 
911 	switch (msm_obj->madv) {
912 	case __MSM_MADV_PURGED:
913 		stats->purged.count++;
914 		stats->purged.size += obj->size;
915 		madv = " purged";
916 		break;
917 	case MSM_MADV_DONTNEED:
918 		stats->purgeable.count++;
919 		stats->purgeable.size += obj->size;
920 		madv = " purgeable";
921 		break;
922 	case MSM_MADV_WILLNEED:
923 	default:
924 		madv = "";
925 		break;
926 	}
927 
928 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
929 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
930 			obj->name, kref_read(&obj->refcount),
931 			off, msm_obj->vaddr);
932 
933 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
934 
935 	if (!list_empty(&msm_obj->vmas)) {
936 
937 		seq_puts(m, "      vmas:");
938 
939 		list_for_each_entry(vma, &msm_obj->vmas, list) {
940 			const char *name, *comm;
941 			if (vma->aspace) {
942 				struct msm_gem_address_space *aspace = vma->aspace;
943 				struct task_struct *task =
944 					get_pid_task(aspace->pid, PIDTYPE_PID);
945 				if (task) {
946 					comm = kstrdup(task->comm, GFP_KERNEL);
947 					put_task_struct(task);
948 				} else {
949 					comm = NULL;
950 				}
951 				name = aspace->name;
952 			} else {
953 				name = comm = NULL;
954 			}
955 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
956 				name, comm ? ":" : "", comm ? comm : "",
957 				vma->aspace, vma->iova,
958 				vma->mapped ? "mapped" : "unmapped",
959 				msm_gem_vma_inuse(vma));
960 			kfree(comm);
961 		}
962 
963 		seq_puts(m, "\n");
964 	}
965 
966 	dma_resv_describe(robj, m);
967 	msm_gem_unlock(obj);
968 }
969 
970 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
971 {
972 	struct msm_gem_stats stats = {};
973 	struct msm_gem_object *msm_obj;
974 
975 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
976 	list_for_each_entry(msm_obj, list, node) {
977 		struct drm_gem_object *obj = &msm_obj->base;
978 		seq_puts(m, "   ");
979 		msm_gem_describe(obj, m, &stats);
980 	}
981 
982 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
983 			stats.all.count, stats.all.size);
984 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
985 			stats.active.count, stats.active.size);
986 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
987 			stats.resident.count, stats.resident.size);
988 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
989 			stats.purgeable.count, stats.purgeable.size);
990 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
991 			stats.purged.count, stats.purged.size);
992 }
993 #endif
994 
995 /* don't call directly!  Use drm_gem_object_put() */
996 static void msm_gem_free_object(struct drm_gem_object *obj)
997 {
998 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
999 	struct drm_device *dev = obj->dev;
1000 	struct msm_drm_private *priv = dev->dev_private;
1001 
1002 	mutex_lock(&priv->obj_lock);
1003 	list_del(&msm_obj->node);
1004 	mutex_unlock(&priv->obj_lock);
1005 
1006 	put_iova_spaces(obj, true);
1007 
1008 	if (obj->import_attach) {
1009 		GEM_WARN_ON(msm_obj->vaddr);
1010 
1011 		/* Don't drop the pages for imported dmabuf, as they are not
1012 		 * ours, just free the array we allocated:
1013 		 */
1014 		kvfree(msm_obj->pages);
1015 
1016 		put_iova_vmas(obj);
1017 
1018 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1019 	} else {
1020 		msm_gem_vunmap(obj);
1021 		put_pages(obj);
1022 		put_iova_vmas(obj);
1023 	}
1024 
1025 	drm_gem_object_release(obj);
1026 
1027 	kfree(msm_obj);
1028 }
1029 
1030 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1031 {
1032 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1033 
1034 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1035 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1036 
1037 	return 0;
1038 }
1039 
1040 /* convenience method to construct a GEM buffer object, and userspace handle */
1041 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1042 		uint32_t size, uint32_t flags, uint32_t *handle,
1043 		char *name)
1044 {
1045 	struct drm_gem_object *obj;
1046 	int ret;
1047 
1048 	obj = msm_gem_new(dev, size, flags);
1049 
1050 	if (IS_ERR(obj))
1051 		return PTR_ERR(obj);
1052 
1053 	if (name)
1054 		msm_gem_object_set_name(obj, "%s", name);
1055 
1056 	ret = drm_gem_handle_create(file, obj, handle);
1057 
1058 	/* drop reference from allocate - handle holds it now */
1059 	drm_gem_object_put(obj);
1060 
1061 	return ret;
1062 }
1063 
1064 static const struct vm_operations_struct vm_ops = {
1065 	.fault = msm_gem_fault,
1066 	.open = drm_gem_vm_open,
1067 	.close = drm_gem_vm_close,
1068 };
1069 
1070 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1071 	.free = msm_gem_free_object,
1072 	.pin = msm_gem_prime_pin,
1073 	.unpin = msm_gem_prime_unpin,
1074 	.get_sg_table = msm_gem_prime_get_sg_table,
1075 	.vmap = msm_gem_prime_vmap,
1076 	.vunmap = msm_gem_prime_vunmap,
1077 	.mmap = msm_gem_object_mmap,
1078 	.vm_ops = &vm_ops,
1079 };
1080 
1081 static int msm_gem_new_impl(struct drm_device *dev,
1082 		uint32_t size, uint32_t flags,
1083 		struct drm_gem_object **obj)
1084 {
1085 	struct msm_drm_private *priv = dev->dev_private;
1086 	struct msm_gem_object *msm_obj;
1087 
1088 	switch (flags & MSM_BO_CACHE_MASK) {
1089 	case MSM_BO_CACHED:
1090 	case MSM_BO_WC:
1091 		break;
1092 	case MSM_BO_CACHED_COHERENT:
1093 		if (priv->has_cached_coherent)
1094 			break;
1095 		fallthrough;
1096 	default:
1097 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1098 				(flags & MSM_BO_CACHE_MASK));
1099 		return -EINVAL;
1100 	}
1101 
1102 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1103 	if (!msm_obj)
1104 		return -ENOMEM;
1105 
1106 	msm_obj->flags = flags;
1107 	msm_obj->madv = MSM_MADV_WILLNEED;
1108 
1109 	INIT_LIST_HEAD(&msm_obj->node);
1110 	INIT_LIST_HEAD(&msm_obj->vmas);
1111 
1112 	*obj = &msm_obj->base;
1113 	(*obj)->funcs = &msm_gem_object_funcs;
1114 
1115 	return 0;
1116 }
1117 
1118 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1119 {
1120 	struct msm_drm_private *priv = dev->dev_private;
1121 	struct msm_gem_object *msm_obj;
1122 	struct drm_gem_object *obj = NULL;
1123 	bool use_vram = false;
1124 	int ret;
1125 
1126 	size = PAGE_ALIGN(size);
1127 
1128 	if (!msm_use_mmu(dev))
1129 		use_vram = true;
1130 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1131 		use_vram = true;
1132 
1133 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1134 		return ERR_PTR(-EINVAL);
1135 
1136 	/* Disallow zero sized objects as they make the underlying
1137 	 * infrastructure grumpy
1138 	 */
1139 	if (size == 0)
1140 		return ERR_PTR(-EINVAL);
1141 
1142 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1143 	if (ret)
1144 		return ERR_PTR(ret);
1145 
1146 	msm_obj = to_msm_bo(obj);
1147 
1148 	if (use_vram) {
1149 		struct msm_gem_vma *vma;
1150 		struct page **pages;
1151 
1152 		drm_gem_private_object_init(dev, obj, size);
1153 
1154 		msm_gem_lock(obj);
1155 
1156 		vma = add_vma(obj, NULL);
1157 		msm_gem_unlock(obj);
1158 		if (IS_ERR(vma)) {
1159 			ret = PTR_ERR(vma);
1160 			goto fail;
1161 		}
1162 
1163 		to_msm_bo(obj)->vram_node = &vma->node;
1164 
1165 		msm_gem_lock(obj);
1166 		pages = get_pages(obj);
1167 		msm_gem_unlock(obj);
1168 		if (IS_ERR(pages)) {
1169 			ret = PTR_ERR(pages);
1170 			goto fail;
1171 		}
1172 
1173 		vma->iova = physaddr(obj);
1174 	} else {
1175 		ret = drm_gem_object_init(dev, obj, size);
1176 		if (ret)
1177 			goto fail;
1178 		/*
1179 		 * Our buffers are kept pinned, so allocating them from the
1180 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1181 		 * See comments above new_inode() why this is required _and_
1182 		 * expected if you're going to pin these pages.
1183 		 */
1184 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1185 	}
1186 
1187 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1188 
1189 	mutex_lock(&priv->obj_lock);
1190 	list_add_tail(&msm_obj->node, &priv->objects);
1191 	mutex_unlock(&priv->obj_lock);
1192 
1193 	return obj;
1194 
1195 fail:
1196 	drm_gem_object_put(obj);
1197 	return ERR_PTR(ret);
1198 }
1199 
1200 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1201 		struct dma_buf *dmabuf, struct sg_table *sgt)
1202 {
1203 	struct msm_drm_private *priv = dev->dev_private;
1204 	struct msm_gem_object *msm_obj;
1205 	struct drm_gem_object *obj;
1206 	uint32_t size;
1207 	int ret, npages;
1208 
1209 	/* if we don't have IOMMU, don't bother pretending we can import: */
1210 	if (!msm_use_mmu(dev)) {
1211 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1212 		return ERR_PTR(-EINVAL);
1213 	}
1214 
1215 	size = PAGE_ALIGN(dmabuf->size);
1216 
1217 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1218 	if (ret)
1219 		return ERR_PTR(ret);
1220 
1221 	drm_gem_private_object_init(dev, obj, size);
1222 
1223 	npages = size / PAGE_SIZE;
1224 
1225 	msm_obj = to_msm_bo(obj);
1226 	msm_gem_lock(obj);
1227 	msm_obj->sgt = sgt;
1228 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1229 	if (!msm_obj->pages) {
1230 		msm_gem_unlock(obj);
1231 		ret = -ENOMEM;
1232 		goto fail;
1233 	}
1234 
1235 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1236 	if (ret) {
1237 		msm_gem_unlock(obj);
1238 		goto fail;
1239 	}
1240 
1241 	msm_gem_unlock(obj);
1242 
1243 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1244 
1245 	mutex_lock(&priv->obj_lock);
1246 	list_add_tail(&msm_obj->node, &priv->objects);
1247 	mutex_unlock(&priv->obj_lock);
1248 
1249 	return obj;
1250 
1251 fail:
1252 	drm_gem_object_put(obj);
1253 	return ERR_PTR(ret);
1254 }
1255 
1256 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1257 		uint32_t flags, struct msm_gem_address_space *aspace,
1258 		struct drm_gem_object **bo, uint64_t *iova)
1259 {
1260 	void *vaddr;
1261 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1262 	int ret;
1263 
1264 	if (IS_ERR(obj))
1265 		return ERR_CAST(obj);
1266 
1267 	if (iova) {
1268 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1269 		if (ret)
1270 			goto err;
1271 	}
1272 
1273 	vaddr = msm_gem_get_vaddr(obj);
1274 	if (IS_ERR(vaddr)) {
1275 		msm_gem_unpin_iova(obj, aspace);
1276 		ret = PTR_ERR(vaddr);
1277 		goto err;
1278 	}
1279 
1280 	if (bo)
1281 		*bo = obj;
1282 
1283 	return vaddr;
1284 err:
1285 	drm_gem_object_put(obj);
1286 
1287 	return ERR_PTR(ret);
1288 
1289 }
1290 
1291 void msm_gem_kernel_put(struct drm_gem_object *bo,
1292 		struct msm_gem_address_space *aspace)
1293 {
1294 	if (IS_ERR_OR_NULL(bo))
1295 		return;
1296 
1297 	msm_gem_put_vaddr(bo);
1298 	msm_gem_unpin_iova(bo, aspace);
1299 	drm_gem_object_put(bo);
1300 }
1301 
1302 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1303 {
1304 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1305 	va_list ap;
1306 
1307 	if (!fmt)
1308 		return;
1309 
1310 	va_start(ap, fmt);
1311 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1312 	va_end(ap);
1313 }
1314