xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision caab277b)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8afe684SRob Clark /*
3c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
4c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5c8afe684SRob Clark  */
6c8afe684SRob Clark 
7c8afe684SRob Clark #include <linux/spinlock.h>
8c8afe684SRob Clark #include <linux/shmem_fs.h>
905b84911SRob Clark #include <linux/dma-buf.h>
1001c8f1c4SDan Williams #include <linux/pfn_t.h>
11c8afe684SRob Clark 
12c8afe684SRob Clark #include "msm_drv.h"
13fde5de6cSRob Clark #include "msm_fence.h"
14c8afe684SRob Clark #include "msm_gem.h"
157198e6b0SRob Clark #include "msm_gpu.h"
16871d812aSRob Clark #include "msm_mmu.h"
17c8afe684SRob Clark 
180e08270aSSushmita Susheelendra static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
190e08270aSSushmita Susheelendra 
200e08270aSSushmita Susheelendra 
21871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
22871d812aSRob Clark {
23871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
24871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
25871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
26871d812aSRob Clark 			priv->vram.paddr;
27871d812aSRob Clark }
28871d812aSRob Clark 
29072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
30072f1f91SRob Clark {
31072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
32072f1f91SRob Clark 	return !msm_obj->vram_node;
33072f1f91SRob Clark }
34072f1f91SRob Clark 
35871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
360e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
37871d812aSRob Clark {
38871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
39871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
40871d812aSRob Clark 	dma_addr_t paddr;
41871d812aSRob Clark 	struct page **p;
42871d812aSRob Clark 	int ret, i;
43871d812aSRob Clark 
442098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
45871d812aSRob Clark 	if (!p)
46871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
47871d812aSRob Clark 
480e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
494e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
500e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
51871d812aSRob Clark 	if (ret) {
522098105eSMichal Hocko 		kvfree(p);
53871d812aSRob Clark 		return ERR_PTR(ret);
54871d812aSRob Clark 	}
55871d812aSRob Clark 
56871d812aSRob Clark 	paddr = physaddr(obj);
57871d812aSRob Clark 	for (i = 0; i < npages; i++) {
58871d812aSRob Clark 		p[i] = phys_to_page(paddr);
59871d812aSRob Clark 		paddr += PAGE_SIZE;
60871d812aSRob Clark 	}
61871d812aSRob Clark 
62871d812aSRob Clark 	return p;
63871d812aSRob Clark }
64c8afe684SRob Clark 
65c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
66c8afe684SRob Clark {
67c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
68c8afe684SRob Clark 
69c8afe684SRob Clark 	if (!msm_obj->pages) {
70c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
71871d812aSRob Clark 		struct page **p;
72c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
73c8afe684SRob Clark 
74072f1f91SRob Clark 		if (use_pages(obj))
750cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
76871d812aSRob Clark 		else
77871d812aSRob Clark 			p = get_pages_vram(obj, npages);
78871d812aSRob Clark 
79c8afe684SRob Clark 		if (IS_ERR(p)) {
806a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
81c8afe684SRob Clark 					PTR_ERR(p));
82c8afe684SRob Clark 			return p;
83c8afe684SRob Clark 		}
84c8afe684SRob Clark 
8562e3a3e3SPrakash Kamliya 		msm_obj->pages = p;
8662e3a3e3SPrakash Kamliya 
87c8afe684SRob Clark 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
881f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
8962e3a3e3SPrakash Kamliya 			void *ptr = ERR_CAST(msm_obj->sgt);
90c8afe684SRob Clark 
916a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
9262e3a3e3SPrakash Kamliya 			msm_obj->sgt = NULL;
9362e3a3e3SPrakash Kamliya 			return ptr;
9462e3a3e3SPrakash Kamliya 		}
95c8afe684SRob Clark 
96c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
97c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
98c8afe684SRob Clark 		 */
99c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
100c8afe684SRob Clark 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
101c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
102c8afe684SRob Clark 	}
103c8afe684SRob Clark 
104c8afe684SRob Clark 	return msm_obj->pages;
105c8afe684SRob Clark }
106c8afe684SRob Clark 
1070e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj)
1080e08270aSSushmita Susheelendra {
1090e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1100e08270aSSushmita Susheelendra 	struct msm_drm_private *priv = obj->dev->dev_private;
1110e08270aSSushmita Susheelendra 
1120e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
1130e08270aSSushmita Susheelendra 	drm_mm_remove_node(msm_obj->vram_node);
1140e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
1150e08270aSSushmita Susheelendra 
1160e08270aSSushmita Susheelendra 	kvfree(msm_obj->pages);
1170e08270aSSushmita Susheelendra }
1180e08270aSSushmita Susheelendra 
119c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
120c8afe684SRob Clark {
121c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
122c8afe684SRob Clark 
123c8afe684SRob Clark 	if (msm_obj->pages) {
1243976626eSBen Hutchings 		if (msm_obj->sgt) {
1253976626eSBen Hutchings 			/* For non-cached buffers, ensure the new
1263976626eSBen Hutchings 			 * pages are clean because display controller,
1273976626eSBen Hutchings 			 * GPU, etc. are not coherent:
128c8afe684SRob Clark 			 */
129c8afe684SRob Clark 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
130c8afe684SRob Clark 				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
1313976626eSBen Hutchings 					     msm_obj->sgt->nents,
1323976626eSBen Hutchings 					     DMA_BIDIRECTIONAL);
13362e3a3e3SPrakash Kamliya 
134c8afe684SRob Clark 			sg_free_table(msm_obj->sgt);
135c8afe684SRob Clark 			kfree(msm_obj->sgt);
1363976626eSBen Hutchings 		}
137c8afe684SRob Clark 
138072f1f91SRob Clark 		if (use_pages(obj))
139c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1400e08270aSSushmita Susheelendra 		else
1410e08270aSSushmita Susheelendra 			put_pages_vram(obj);
142871d812aSRob Clark 
143c8afe684SRob Clark 		msm_obj->pages = NULL;
144c8afe684SRob Clark 	}
145c8afe684SRob Clark }
146c8afe684SRob Clark 
14705b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
14805b84911SRob Clark {
1490e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
15005b84911SRob Clark 	struct page **p;
1510e08270aSSushmita Susheelendra 
1520e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
1530e08270aSSushmita Susheelendra 
1540e08270aSSushmita Susheelendra 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
1550e08270aSSushmita Susheelendra 		mutex_unlock(&msm_obj->lock);
1560e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
1570e08270aSSushmita Susheelendra 	}
1580e08270aSSushmita Susheelendra 
15905b84911SRob Clark 	p = get_pages(obj);
1600e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
16105b84911SRob Clark 	return p;
16205b84911SRob Clark }
16305b84911SRob Clark 
16405b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
16505b84911SRob Clark {
16605b84911SRob Clark 	/* when we start tracking the pin count, then do something here */
16705b84911SRob Clark }
16805b84911SRob Clark 
169c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj,
170c8afe684SRob Clark 		struct vm_area_struct *vma)
171c8afe684SRob Clark {
172c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
173c8afe684SRob Clark 
174c8afe684SRob Clark 	vma->vm_flags &= ~VM_PFNMAP;
175c8afe684SRob Clark 	vma->vm_flags |= VM_MIXEDMAP;
176c8afe684SRob Clark 
177c8afe684SRob Clark 	if (msm_obj->flags & MSM_BO_WC) {
178c8afe684SRob Clark 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
179c8afe684SRob Clark 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
180c8afe684SRob Clark 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
181c8afe684SRob Clark 	} else {
182c8afe684SRob Clark 		/*
183c8afe684SRob Clark 		 * Shunt off cached objs to shmem file so they have their own
184c8afe684SRob Clark 		 * address_space (so unmap_mapping_range does what we want,
185c8afe684SRob Clark 		 * in particular in the case of mmap'd dmabufs)
186c8afe684SRob Clark 		 */
187c8afe684SRob Clark 		fput(vma->vm_file);
188c8afe684SRob Clark 		get_file(obj->filp);
189c8afe684SRob Clark 		vma->vm_pgoff = 0;
190c8afe684SRob Clark 		vma->vm_file  = obj->filp;
191c8afe684SRob Clark 
192c8afe684SRob Clark 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
193c8afe684SRob Clark 	}
194c8afe684SRob Clark 
195c8afe684SRob Clark 	return 0;
196c8afe684SRob Clark }
197c8afe684SRob Clark 
198c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
199c8afe684SRob Clark {
200c8afe684SRob Clark 	int ret;
201c8afe684SRob Clark 
202c8afe684SRob Clark 	ret = drm_gem_mmap(filp, vma);
203c8afe684SRob Clark 	if (ret) {
204c8afe684SRob Clark 		DBG("mmap failed: %d", ret);
205c8afe684SRob Clark 		return ret;
206c8afe684SRob Clark 	}
207c8afe684SRob Clark 
208c8afe684SRob Clark 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
209c8afe684SRob Clark }
210c8afe684SRob Clark 
211a5f74ec7SSouptick Joarder vm_fault_t msm_gem_fault(struct vm_fault *vmf)
212c8afe684SRob Clark {
21311bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
214c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
2150e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
216c8afe684SRob Clark 	struct page **pages;
217c8afe684SRob Clark 	unsigned long pfn;
218c8afe684SRob Clark 	pgoff_t pgoff;
219a5f74ec7SSouptick Joarder 	int err;
220a5f74ec7SSouptick Joarder 	vm_fault_t ret;
221c8afe684SRob Clark 
2220e08270aSSushmita Susheelendra 	/*
2230e08270aSSushmita Susheelendra 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
2240e08270aSSushmita Susheelendra 	 * a reference on obj. So, we dont need to hold one here.
225d78d383aSRob Clark 	 */
226a5f74ec7SSouptick Joarder 	err = mutex_lock_interruptible(&msm_obj->lock);
227a5f74ec7SSouptick Joarder 	if (err) {
228a5f74ec7SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
229c8afe684SRob Clark 		goto out;
230a5f74ec7SSouptick Joarder 	}
231c8afe684SRob Clark 
2320e08270aSSushmita Susheelendra 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
2330e08270aSSushmita Susheelendra 		mutex_unlock(&msm_obj->lock);
2340e08270aSSushmita Susheelendra 		return VM_FAULT_SIGBUS;
2350e08270aSSushmita Susheelendra 	}
2360e08270aSSushmita Susheelendra 
237c8afe684SRob Clark 	/* make sure we have pages attached now */
238c8afe684SRob Clark 	pages = get_pages(obj);
239c8afe684SRob Clark 	if (IS_ERR(pages)) {
240a5f74ec7SSouptick Joarder 		ret = vmf_error(PTR_ERR(pages));
241c8afe684SRob Clark 		goto out_unlock;
242c8afe684SRob Clark 	}
243c8afe684SRob Clark 
244c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2451a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
246c8afe684SRob Clark 
247871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
248c8afe684SRob Clark 
2491a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
250c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
251c8afe684SRob Clark 
252a5f74ec7SSouptick Joarder 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
253c8afe684SRob Clark out_unlock:
2540e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
255c8afe684SRob Clark out:
256a5f74ec7SSouptick Joarder 	return ret;
257c8afe684SRob Clark }
258c8afe684SRob Clark 
259c8afe684SRob Clark /** get mmap offset */
260c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
261c8afe684SRob Clark {
262c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
2630e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
264c8afe684SRob Clark 	int ret;
265c8afe684SRob Clark 
2660e08270aSSushmita Susheelendra 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
267c8afe684SRob Clark 
268c8afe684SRob Clark 	/* Make it mmapable */
269c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
270c8afe684SRob Clark 
271c8afe684SRob Clark 	if (ret) {
2726a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
273c8afe684SRob Clark 		return 0;
274c8afe684SRob Clark 	}
275c8afe684SRob Clark 
276c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
277c8afe684SRob Clark }
278c8afe684SRob Clark 
279c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
280c8afe684SRob Clark {
281c8afe684SRob Clark 	uint64_t offset;
2820e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
2830e08270aSSushmita Susheelendra 
2840e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
285c8afe684SRob Clark 	offset = mmap_offset(obj);
2860e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
287c8afe684SRob Clark 	return offset;
288c8afe684SRob Clark }
289c8afe684SRob Clark 
2904b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
2914b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
2924b85f7f5SRob Clark {
2934b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
2944b85f7f5SRob Clark 	struct msm_gem_vma *vma;
2954b85f7f5SRob Clark 
2960e08270aSSushmita Susheelendra 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
2970e08270aSSushmita Susheelendra 
2984b85f7f5SRob Clark 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
2994b85f7f5SRob Clark 	if (!vma)
3004b85f7f5SRob Clark 		return ERR_PTR(-ENOMEM);
3014b85f7f5SRob Clark 
3024b85f7f5SRob Clark 	vma->aspace = aspace;
3034b85f7f5SRob Clark 
3044b85f7f5SRob Clark 	list_add_tail(&vma->list, &msm_obj->vmas);
3054b85f7f5SRob Clark 
3064b85f7f5SRob Clark 	return vma;
3074b85f7f5SRob Clark }
3084b85f7f5SRob Clark 
3094b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
3104b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3114b85f7f5SRob Clark {
3124b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3134b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3144b85f7f5SRob Clark 
3150e08270aSSushmita Susheelendra 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
3164b85f7f5SRob Clark 
3174b85f7f5SRob Clark 	list_for_each_entry(vma, &msm_obj->vmas, list) {
3184b85f7f5SRob Clark 		if (vma->aspace == aspace)
3194b85f7f5SRob Clark 			return vma;
3204b85f7f5SRob Clark 	}
3214b85f7f5SRob Clark 
3224b85f7f5SRob Clark 	return NULL;
3234b85f7f5SRob Clark }
3244b85f7f5SRob Clark 
3254b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma)
3264b85f7f5SRob Clark {
3274b85f7f5SRob Clark 	if (!vma)
3284b85f7f5SRob Clark 		return;
3294b85f7f5SRob Clark 
3304b85f7f5SRob Clark 	list_del(&vma->list);
3314b85f7f5SRob Clark 	kfree(vma);
3324b85f7f5SRob Clark }
3334b85f7f5SRob Clark 
3340e08270aSSushmita Susheelendra /* Called with msm_obj->lock locked */
3354fe5f65eSRob Clark static void
3364fe5f65eSRob Clark put_iova(struct drm_gem_object *obj)
3374fe5f65eSRob Clark {
3384fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3394b85f7f5SRob Clark 	struct msm_gem_vma *vma, *tmp;
3404fe5f65eSRob Clark 
3410e08270aSSushmita Susheelendra 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
3424fe5f65eSRob Clark 
3434b85f7f5SRob Clark 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
3447ad0e8cfSJordan Crouse 		msm_gem_purge_vma(vma->aspace, vma);
3457ad0e8cfSJordan Crouse 		msm_gem_close_vma(vma->aspace, vma);
3464b85f7f5SRob Clark 		del_vma(vma);
3474fe5f65eSRob Clark 	}
3484fe5f65eSRob Clark }
3494fe5f65eSRob Clark 
350c0ee9794SJordan Crouse static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
3518bdcd949SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova)
352c8afe684SRob Clark {
353c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3544b85f7f5SRob Clark 	struct msm_gem_vma *vma;
355c8afe684SRob Clark 	int ret = 0;
356c8afe684SRob Clark 
357c0ee9794SJordan Crouse 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
358cb1e3818SRob Clark 
3594b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
360871d812aSRob Clark 
3614b85f7f5SRob Clark 	if (!vma) {
3624b85f7f5SRob Clark 		vma = add_vma(obj, aspace);
363c0ee9794SJordan Crouse 		if (IS_ERR(vma))
364c0ee9794SJordan Crouse 			return PTR_ERR(vma);
3654b85f7f5SRob Clark 
366c0ee9794SJordan Crouse 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
367c0ee9794SJordan Crouse 		if (ret) {
368c0ee9794SJordan Crouse 			del_vma(vma);
369c0ee9794SJordan Crouse 			return ret;
370c8afe684SRob Clark 		}
3714b85f7f5SRob Clark 	}
3724b85f7f5SRob Clark 
3734b85f7f5SRob Clark 	*iova = vma->iova;
3744b85f7f5SRob Clark 	return 0;
375c0ee9794SJordan Crouse }
3764b85f7f5SRob Clark 
377c0ee9794SJordan Crouse static int msm_gem_pin_iova(struct drm_gem_object *obj,
378c0ee9794SJordan Crouse 		struct msm_gem_address_space *aspace)
379c0ee9794SJordan Crouse {
380c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
381c0ee9794SJordan Crouse 	struct msm_gem_vma *vma;
382c0ee9794SJordan Crouse 	struct page **pages;
383bbc2cd07SRob Clark 	int prot = IOMMU_READ;
384bbc2cd07SRob Clark 
385bbc2cd07SRob Clark 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
386bbc2cd07SRob Clark 		prot |= IOMMU_WRITE;
387c0ee9794SJordan Crouse 
388c0ee9794SJordan Crouse 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
389c0ee9794SJordan Crouse 
390c0ee9794SJordan Crouse 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
391c0ee9794SJordan Crouse 		return -EBUSY;
392c0ee9794SJordan Crouse 
393c0ee9794SJordan Crouse 	vma = lookup_vma(obj, aspace);
394c0ee9794SJordan Crouse 	if (WARN_ON(!vma))
395c0ee9794SJordan Crouse 		return -EINVAL;
396c0ee9794SJordan Crouse 
397c0ee9794SJordan Crouse 	pages = get_pages(obj);
398c0ee9794SJordan Crouse 	if (IS_ERR(pages))
399c0ee9794SJordan Crouse 		return PTR_ERR(pages);
400c0ee9794SJordan Crouse 
401bbc2cd07SRob Clark 	return msm_gem_map_vma(aspace, vma, prot,
402bbc2cd07SRob Clark 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
403c0ee9794SJordan Crouse }
404c0ee9794SJordan Crouse 
4059fe041f6SJordan Crouse /* get iova and pin it. Should have a matching put */
4069fe041f6SJordan Crouse int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
407c0ee9794SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
408c0ee9794SJordan Crouse {
409c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
410c0ee9794SJordan Crouse 	u64 local;
411c0ee9794SJordan Crouse 	int ret;
412c0ee9794SJordan Crouse 
413c0ee9794SJordan Crouse 	mutex_lock(&msm_obj->lock);
414c0ee9794SJordan Crouse 
415c0ee9794SJordan Crouse 	ret = msm_gem_get_iova_locked(obj, aspace, &local);
416c0ee9794SJordan Crouse 
417c0ee9794SJordan Crouse 	if (!ret)
418c0ee9794SJordan Crouse 		ret = msm_gem_pin_iova(obj, aspace);
419c0ee9794SJordan Crouse 
420c0ee9794SJordan Crouse 	if (!ret)
421c0ee9794SJordan Crouse 		*iova = local;
422c0ee9794SJordan Crouse 
4230e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
424c8afe684SRob Clark 	return ret;
425c8afe684SRob Clark }
426c8afe684SRob Clark 
4277ad0e8cfSJordan Crouse /*
4287ad0e8cfSJordan Crouse  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
4297ad0e8cfSJordan Crouse  * valid for the life of the object
4307ad0e8cfSJordan Crouse  */
4319fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj,
4329fe041f6SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
4339fe041f6SJordan Crouse {
4349fe041f6SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4359fe041f6SJordan Crouse 	int ret;
4369fe041f6SJordan Crouse 
4379fe041f6SJordan Crouse 	mutex_lock(&msm_obj->lock);
4389fe041f6SJordan Crouse 	ret = msm_gem_get_iova_locked(obj, aspace, iova);
4399fe041f6SJordan Crouse 	mutex_unlock(&msm_obj->lock);
4409fe041f6SJordan Crouse 
4419fe041f6SJordan Crouse 	return ret;
4429fe041f6SJordan Crouse }
4439fe041f6SJordan Crouse 
4442638d90aSRob Clark /* get iova without taking a reference, used in places where you have
4459fe041f6SJordan Crouse  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
4462638d90aSRob Clark  */
4478bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj,
4488bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
4492638d90aSRob Clark {
4500e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4514b85f7f5SRob Clark 	struct msm_gem_vma *vma;
4524b85f7f5SRob Clark 
4530e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
4544b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
4550e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
4564b85f7f5SRob Clark 	WARN_ON(!vma);
4574b85f7f5SRob Clark 
4584b85f7f5SRob Clark 	return vma ? vma->iova : 0;
4592638d90aSRob Clark }
4602638d90aSRob Clark 
4617ad0e8cfSJordan Crouse /*
4627ad0e8cfSJordan Crouse  * Unpin a iova by updating the reference counts. The memory isn't actually
4637ad0e8cfSJordan Crouse  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
4647ad0e8cfSJordan Crouse  * to get rid of it
4657ad0e8cfSJordan Crouse  */
4667ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj,
4678bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
468c8afe684SRob Clark {
4697ad0e8cfSJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4707ad0e8cfSJordan Crouse 	struct msm_gem_vma *vma;
4717ad0e8cfSJordan Crouse 
4727ad0e8cfSJordan Crouse 	mutex_lock(&msm_obj->lock);
4737ad0e8cfSJordan Crouse 	vma = lookup_vma(obj, aspace);
4747ad0e8cfSJordan Crouse 
4757ad0e8cfSJordan Crouse 	if (!WARN_ON(!vma))
4767ad0e8cfSJordan Crouse 		msm_gem_unmap_vma(aspace, vma);
4777ad0e8cfSJordan Crouse 
4787ad0e8cfSJordan Crouse 	mutex_unlock(&msm_obj->lock);
479c8afe684SRob Clark }
480c8afe684SRob Clark 
481c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
482c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
483c8afe684SRob Clark {
484c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
485c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
486c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
4870815d774SJordan Crouse 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
488c8afe684SRob Clark }
489c8afe684SRob Clark 
490c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
491c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
492c8afe684SRob Clark {
493c8afe684SRob Clark 	struct drm_gem_object *obj;
494c8afe684SRob Clark 	int ret = 0;
495c8afe684SRob Clark 
496c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
497a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
498c8afe684SRob Clark 	if (obj == NULL) {
499c8afe684SRob Clark 		ret = -ENOENT;
500c8afe684SRob Clark 		goto fail;
501c8afe684SRob Clark 	}
502c8afe684SRob Clark 
503c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
504c8afe684SRob Clark 
505dc9a9b32SSteve Kowalik 	drm_gem_object_put_unlocked(obj);
506c8afe684SRob Clark 
507c8afe684SRob Clark fail:
508c8afe684SRob Clark 	return ret;
509c8afe684SRob Clark }
510c8afe684SRob Clark 
511fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
512c8afe684SRob Clark {
5130e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5140e08270aSSushmita Susheelendra 	int ret = 0;
5150e08270aSSushmita Susheelendra 
5160e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
5170e08270aSSushmita Susheelendra 
518fad33f4bSRob Clark 	if (WARN_ON(msm_obj->madv > madv)) {
5196a41da17SMamta Shukla 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
520fad33f4bSRob Clark 			msm_obj->madv, madv);
5210e08270aSSushmita Susheelendra 		mutex_unlock(&msm_obj->lock);
5220e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
523c8afe684SRob Clark 	}
524c8afe684SRob Clark 
5250e08270aSSushmita Susheelendra 	/* increment vmap_count *before* vmap() call, so shrinker can
5260e08270aSSushmita Susheelendra 	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
5270e08270aSSushmita Susheelendra 	 * This guarantees that we won't try to msm_gem_vunmap() this
5280e08270aSSushmita Susheelendra 	 * same object from within the vmap() call (while we already
5290e08270aSSushmita Susheelendra 	 * hold msm_obj->lock)
5300e08270aSSushmita Susheelendra 	 */
5310e08270aSSushmita Susheelendra 	msm_obj->vmap_count++;
5320e08270aSSushmita Susheelendra 
5330e08270aSSushmita Susheelendra 	if (!msm_obj->vaddr) {
5340e08270aSSushmita Susheelendra 		struct page **pages = get_pages(obj);
5350e08270aSSushmita Susheelendra 		if (IS_ERR(pages)) {
5360e08270aSSushmita Susheelendra 			ret = PTR_ERR(pages);
5370e08270aSSushmita Susheelendra 			goto fail;
5380e08270aSSushmita Susheelendra 		}
5390e08270aSSushmita Susheelendra 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
5400e08270aSSushmita Susheelendra 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
5410e08270aSSushmita Susheelendra 		if (msm_obj->vaddr == NULL) {
5420e08270aSSushmita Susheelendra 			ret = -ENOMEM;
5430e08270aSSushmita Susheelendra 			goto fail;
5440e08270aSSushmita Susheelendra 		}
5450e08270aSSushmita Susheelendra 	}
5460e08270aSSushmita Susheelendra 
5470e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
5480e08270aSSushmita Susheelendra 	return msm_obj->vaddr;
5490e08270aSSushmita Susheelendra 
5500e08270aSSushmita Susheelendra fail:
551e1e9db2cSRob Clark 	msm_obj->vmap_count--;
5520e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
5530e08270aSSushmita Susheelendra 	return ERR_PTR(ret);
55418f23049SRob Clark }
55518f23049SRob Clark 
556fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
557fad33f4bSRob Clark {
558fad33f4bSRob Clark 	return get_vaddr(obj, MSM_MADV_WILLNEED);
559fad33f4bSRob Clark }
560fad33f4bSRob Clark 
561fad33f4bSRob Clark /*
562fad33f4bSRob Clark  * Don't use this!  It is for the very special case of dumping
563fad33f4bSRob Clark  * submits from GPU hangs or faults, were the bo may already
564fad33f4bSRob Clark  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
565fad33f4bSRob Clark  * active list.
566fad33f4bSRob Clark  */
567fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
568fad33f4bSRob Clark {
569fad33f4bSRob Clark 	return get_vaddr(obj, __MSM_MADV_PURGED);
570fad33f4bSRob Clark }
571fad33f4bSRob Clark 
57218f23049SRob Clark void msm_gem_put_vaddr(struct drm_gem_object *obj)
57318f23049SRob Clark {
5740e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5750e08270aSSushmita Susheelendra 
5760e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
5770e08270aSSushmita Susheelendra 	WARN_ON(msm_obj->vmap_count < 1);
5780e08270aSSushmita Susheelendra 	msm_obj->vmap_count--;
5790e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
58018f23049SRob Clark }
58118f23049SRob Clark 
5824cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
5834cd33c48SRob Clark  * false or -errno.
5844cd33c48SRob Clark  */
5854cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
5864cd33c48SRob Clark {
5874cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5884cd33c48SRob Clark 
5890e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
5900e08270aSSushmita Susheelendra 
5914cd33c48SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
5924cd33c48SRob Clark 
5934cd33c48SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
5944cd33c48SRob Clark 		msm_obj->madv = madv;
5954cd33c48SRob Clark 
5960e08270aSSushmita Susheelendra 	madv = msm_obj->madv;
5970e08270aSSushmita Susheelendra 
5980e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
5990e08270aSSushmita Susheelendra 
6000e08270aSSushmita Susheelendra 	return (madv != __MSM_MADV_PURGED);
6014cd33c48SRob Clark }
6024cd33c48SRob Clark 
6030e08270aSSushmita Susheelendra void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
60468209390SRob Clark {
60568209390SRob Clark 	struct drm_device *dev = obj->dev;
60668209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
60768209390SRob Clark 
60868209390SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
60968209390SRob Clark 	WARN_ON(!is_purgeable(msm_obj));
61068209390SRob Clark 	WARN_ON(obj->import_attach);
61168209390SRob Clark 
6120e08270aSSushmita Susheelendra 	mutex_lock_nested(&msm_obj->lock, subclass);
6130e08270aSSushmita Susheelendra 
61468209390SRob Clark 	put_iova(obj);
61568209390SRob Clark 
6160e08270aSSushmita Susheelendra 	msm_gem_vunmap_locked(obj);
61768209390SRob Clark 
61868209390SRob Clark 	put_pages(obj);
61968209390SRob Clark 
62068209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
62168209390SRob Clark 
62268209390SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
62368209390SRob Clark 	drm_gem_free_mmap_offset(obj);
62468209390SRob Clark 
62568209390SRob Clark 	/* Our goal here is to return as much of the memory as
62668209390SRob Clark 	 * is possible back to the system as we are called from OOM.
62768209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
62868209390SRob Clark 	 * backing pages, *now*.
62968209390SRob Clark 	 */
63068209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
63168209390SRob Clark 
63268209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
63368209390SRob Clark 			0, (loff_t)-1);
6340e08270aSSushmita Susheelendra 
6350e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
63668209390SRob Clark }
63768209390SRob Clark 
6380e08270aSSushmita Susheelendra static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
639e1e9db2cSRob Clark {
640e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
641e1e9db2cSRob Clark 
6420e08270aSSushmita Susheelendra 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
6430e08270aSSushmita Susheelendra 
644e1e9db2cSRob Clark 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
645e1e9db2cSRob Clark 		return;
646e1e9db2cSRob Clark 
647e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
648e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
649e1e9db2cSRob Clark }
650e1e9db2cSRob Clark 
6510e08270aSSushmita Susheelendra void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
6520e08270aSSushmita Susheelendra {
6530e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6540e08270aSSushmita Susheelendra 
6550e08270aSSushmita Susheelendra 	mutex_lock_nested(&msm_obj->lock, subclass);
6560e08270aSSushmita Susheelendra 	msm_gem_vunmap_locked(obj);
6570e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
6580e08270aSSushmita Susheelendra }
6590e08270aSSushmita Susheelendra 
660b6295f9aSRob Clark /* must be called before _move_to_active().. */
661b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj,
662b6295f9aSRob Clark 		struct msm_fence_context *fctx, bool exclusive)
663b6295f9aSRob Clark {
664b6295f9aSRob Clark 	struct reservation_object_list *fobj;
665f54d1867SChris Wilson 	struct dma_fence *fence;
666b6295f9aSRob Clark 	int i, ret;
667b6295f9aSRob Clark 
668dd55cf69SRob Herring 	fobj = reservation_object_get_list(obj->resv);
669b6295f9aSRob Clark 	if (!fobj || (fobj->shared_count == 0)) {
670dd55cf69SRob Herring 		fence = reservation_object_get_excl(obj->resv);
671b6295f9aSRob Clark 		/* don't need to wait on our own fences, since ring is fifo */
672b6295f9aSRob Clark 		if (fence && (fence->context != fctx->context)) {
673f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
674b6295f9aSRob Clark 			if (ret)
675b6295f9aSRob Clark 				return ret;
676b6295f9aSRob Clark 		}
677b6295f9aSRob Clark 	}
678b6295f9aSRob Clark 
679b6295f9aSRob Clark 	if (!exclusive || !fobj)
680b6295f9aSRob Clark 		return 0;
681b6295f9aSRob Clark 
682b6295f9aSRob Clark 	for (i = 0; i < fobj->shared_count; i++) {
683b6295f9aSRob Clark 		fence = rcu_dereference_protected(fobj->shared[i],
684dd55cf69SRob Herring 						reservation_object_held(obj->resv));
685b6295f9aSRob Clark 		if (fence->context != fctx->context) {
686f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
687b6295f9aSRob Clark 			if (ret)
688b6295f9aSRob Clark 				return ret;
689b6295f9aSRob Clark 		}
690b6295f9aSRob Clark 	}
691b6295f9aSRob Clark 
692b6295f9aSRob Clark 	return 0;
693b6295f9aSRob Clark }
694b6295f9aSRob Clark 
6957198e6b0SRob Clark void msm_gem_move_to_active(struct drm_gem_object *obj,
696f54d1867SChris Wilson 		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
6977198e6b0SRob Clark {
6987198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6994cd33c48SRob Clark 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
7007198e6b0SRob Clark 	msm_obj->gpu = gpu;
701b6295f9aSRob Clark 	if (exclusive)
702dd55cf69SRob Herring 		reservation_object_add_excl_fence(obj->resv, fence);
703bf6811f3SRob Clark 	else
704dd55cf69SRob Herring 		reservation_object_add_shared_fence(obj->resv, fence);
7057198e6b0SRob Clark 	list_del_init(&msm_obj->mm_list);
7067198e6b0SRob Clark 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
7077198e6b0SRob Clark }
7087198e6b0SRob Clark 
7097198e6b0SRob Clark void msm_gem_move_to_inactive(struct drm_gem_object *obj)
7107198e6b0SRob Clark {
7117198e6b0SRob Clark 	struct drm_device *dev = obj->dev;
7127198e6b0SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
7137198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
7147198e6b0SRob Clark 
7157198e6b0SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
7167198e6b0SRob Clark 
7177198e6b0SRob Clark 	msm_obj->gpu = NULL;
7187198e6b0SRob Clark 	list_del_init(&msm_obj->mm_list);
7197198e6b0SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
7207198e6b0SRob Clark }
7217198e6b0SRob Clark 
722ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
723ba00c3f2SRob Clark {
724b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
725f755e227SChris Wilson 	unsigned long remain =
726f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
727f755e227SChris Wilson 	long ret;
728b6295f9aSRob Clark 
729dd55cf69SRob Herring 	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
730f755e227SChris Wilson 						  true,  remain);
731f755e227SChris Wilson 	if (ret == 0)
732f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
733f755e227SChris Wilson 	else if (ret < 0)
734f755e227SChris Wilson 		return ret;
735ba00c3f2SRob Clark 
7367198e6b0SRob Clark 	/* TODO cache maintenance */
7377198e6b0SRob Clark 
738b6295f9aSRob Clark 	return 0;
7397198e6b0SRob Clark }
7407198e6b0SRob Clark 
7417198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
7427198e6b0SRob Clark {
7437198e6b0SRob Clark 	/* TODO cache maintenance */
744c8afe684SRob Clark 	return 0;
745c8afe684SRob Clark }
746c8afe684SRob Clark 
747c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
748f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type,
749b6295f9aSRob Clark 		struct seq_file *m)
750b6295f9aSRob Clark {
751f54d1867SChris Wilson 	if (!dma_fence_is_signaled(fence))
752a3115621SDave Airlie 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
753b6295f9aSRob Clark 				fence->ops->get_driver_name(fence),
754b6295f9aSRob Clark 				fence->ops->get_timeline_name(fence),
755b6295f9aSRob Clark 				fence->seqno);
756b6295f9aSRob Clark }
757b6295f9aSRob Clark 
758c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
759c8afe684SRob Clark {
760c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
761dd55cf69SRob Herring 	struct reservation_object *robj = obj->resv;
762b6295f9aSRob Clark 	struct reservation_object_list *fobj;
763f54d1867SChris Wilson 	struct dma_fence *fence;
7644b85f7f5SRob Clark 	struct msm_gem_vma *vma;
765c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
7664cd33c48SRob Clark 	const char *madv;
767c8afe684SRob Clark 
7680e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
769b6295f9aSRob Clark 
7704cd33c48SRob Clark 	switch (msm_obj->madv) {
7714cd33c48SRob Clark 	case __MSM_MADV_PURGED:
7724cd33c48SRob Clark 		madv = " purged";
7734cd33c48SRob Clark 		break;
7744cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
7754cd33c48SRob Clark 		madv = " purgeable";
7764cd33c48SRob Clark 		break;
7774cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
7784cd33c48SRob Clark 	default:
7794cd33c48SRob Clark 		madv = "";
7804cd33c48SRob Clark 		break;
7814cd33c48SRob Clark 	}
7824cd33c48SRob Clark 
783575f0485SJordan Crouse 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
7847198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
7852c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
786667ce33eSRob Clark 			off, msm_obj->vaddr);
787667ce33eSRob Clark 
7880815d774SJordan Crouse 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
789667ce33eSRob Clark 
790575f0485SJordan Crouse 	if (!list_empty(&msm_obj->vmas)) {
791575f0485SJordan Crouse 
792575f0485SJordan Crouse 		seq_puts(m, "      vmas:");
793575f0485SJordan Crouse 
794575f0485SJordan Crouse 		list_for_each_entry(vma, &msm_obj->vmas, list)
79590f94660SBrian Masney 			seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
79690f94660SBrian Masney 				vma->aspace != NULL ? vma->aspace->name : NULL,
7977ad0e8cfSJordan Crouse 				vma->iova, vma->mapped ? "mapped" : "unmapped",
7987ad0e8cfSJordan Crouse 				vma->inuse);
799575f0485SJordan Crouse 
800575f0485SJordan Crouse 		seq_puts(m, "\n");
801575f0485SJordan Crouse 	}
802b6295f9aSRob Clark 
803b6295f9aSRob Clark 	rcu_read_lock();
804b6295f9aSRob Clark 	fobj = rcu_dereference(robj->fence);
805b6295f9aSRob Clark 	if (fobj) {
806b6295f9aSRob Clark 		unsigned int i, shared_count = fobj->shared_count;
807b6295f9aSRob Clark 
808b6295f9aSRob Clark 		for (i = 0; i < shared_count; i++) {
809b6295f9aSRob Clark 			fence = rcu_dereference(fobj->shared[i]);
810b6295f9aSRob Clark 			describe_fence(fence, "Shared", m);
811b6295f9aSRob Clark 		}
812b6295f9aSRob Clark 	}
813b6295f9aSRob Clark 
814b6295f9aSRob Clark 	fence = rcu_dereference(robj->fence_excl);
815b6295f9aSRob Clark 	if (fence)
816b6295f9aSRob Clark 		describe_fence(fence, "Exclusive", m);
817b6295f9aSRob Clark 	rcu_read_unlock();
8180e08270aSSushmita Susheelendra 
8190e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
820c8afe684SRob Clark }
821c8afe684SRob Clark 
822c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
823c8afe684SRob Clark {
824c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
825c8afe684SRob Clark 	int count = 0;
826c8afe684SRob Clark 	size_t size = 0;
827c8afe684SRob Clark 
8280815d774SJordan Crouse 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
829c8afe684SRob Clark 	list_for_each_entry(msm_obj, list, mm_list) {
830c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
831575f0485SJordan Crouse 		seq_puts(m, "   ");
832c8afe684SRob Clark 		msm_gem_describe(obj, m);
833c8afe684SRob Clark 		count++;
834c8afe684SRob Clark 		size += obj->size;
835c8afe684SRob Clark 	}
836c8afe684SRob Clark 
837c8afe684SRob Clark 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
838c8afe684SRob Clark }
839c8afe684SRob Clark #endif
840c8afe684SRob Clark 
841d71b6bd8SRob Clark /* don't call directly!  Use drm_gem_object_put() and friends */
842c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
843c8afe684SRob Clark {
844c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
84548e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
84648e7f183SKristian H. Kristensen 	struct msm_drm_private *priv = dev->dev_private;
84748e7f183SKristian H. Kristensen 
84848e7f183SKristian H. Kristensen 	if (llist_add(&msm_obj->freed, &priv->free_list))
84948e7f183SKristian H. Kristensen 		queue_work(priv->wq, &priv->free_work);
85048e7f183SKristian H. Kristensen }
85148e7f183SKristian H. Kristensen 
85248e7f183SKristian H. Kristensen static void free_object(struct msm_gem_object *msm_obj)
85348e7f183SKristian H. Kristensen {
85448e7f183SKristian H. Kristensen 	struct drm_gem_object *obj = &msm_obj->base;
85548e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
856c8afe684SRob Clark 
857c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
858c8afe684SRob Clark 
8597198e6b0SRob Clark 	/* object should not be on active list: */
8607198e6b0SRob Clark 	WARN_ON(is_active(msm_obj));
8617198e6b0SRob Clark 
862c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
863c8afe684SRob Clark 
8640e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
8650e08270aSSushmita Susheelendra 
8664fe5f65eSRob Clark 	put_iova(obj);
867c8afe684SRob Clark 
86805b84911SRob Clark 	if (obj->import_attach) {
86905b84911SRob Clark 		if (msm_obj->vaddr)
87005b84911SRob Clark 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
87105b84911SRob Clark 
87205b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
87305b84911SRob Clark 		 * ours, just free the array we allocated:
87405b84911SRob Clark 		 */
87505b84911SRob Clark 		if (msm_obj->pages)
8762098105eSMichal Hocko 			kvfree(msm_obj->pages);
87705b84911SRob Clark 
878f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
87905b84911SRob Clark 	} else {
8800e08270aSSushmita Susheelendra 		msm_gem_vunmap_locked(obj);
881c8afe684SRob Clark 		put_pages(obj);
88205b84911SRob Clark 	}
883c8afe684SRob Clark 
884c8afe684SRob Clark 	drm_gem_object_release(obj);
885c8afe684SRob Clark 
8860e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
887c8afe684SRob Clark 	kfree(msm_obj);
888c8afe684SRob Clark }
889c8afe684SRob Clark 
89048e7f183SKristian H. Kristensen void msm_gem_free_work(struct work_struct *work)
89148e7f183SKristian H. Kristensen {
89248e7f183SKristian H. Kristensen 	struct msm_drm_private *priv =
89348e7f183SKristian H. Kristensen 		container_of(work, struct msm_drm_private, free_work);
89448e7f183SKristian H. Kristensen 	struct drm_device *dev = priv->dev;
89548e7f183SKristian H. Kristensen 	struct llist_node *freed;
89648e7f183SKristian H. Kristensen 	struct msm_gem_object *msm_obj, *next;
89748e7f183SKristian H. Kristensen 
89848e7f183SKristian H. Kristensen 	while ((freed = llist_del_all(&priv->free_list))) {
89948e7f183SKristian H. Kristensen 
90048e7f183SKristian H. Kristensen 		mutex_lock(&dev->struct_mutex);
90148e7f183SKristian H. Kristensen 
90248e7f183SKristian H. Kristensen 		llist_for_each_entry_safe(msm_obj, next,
90348e7f183SKristian H. Kristensen 					  freed, freed)
90448e7f183SKristian H. Kristensen 			free_object(msm_obj);
90548e7f183SKristian H. Kristensen 
90648e7f183SKristian H. Kristensen 		mutex_unlock(&dev->struct_mutex);
90748e7f183SKristian H. Kristensen 
90848e7f183SKristian H. Kristensen 		if (need_resched())
90948e7f183SKristian H. Kristensen 			break;
91048e7f183SKristian H. Kristensen 	}
91148e7f183SKristian H. Kristensen }
91248e7f183SKristian H. Kristensen 
913c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
914c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
9150815d774SJordan Crouse 		uint32_t size, uint32_t flags, uint32_t *handle,
9160815d774SJordan Crouse 		char *name)
917c8afe684SRob Clark {
918c8afe684SRob Clark 	struct drm_gem_object *obj;
919c8afe684SRob Clark 	int ret;
920c8afe684SRob Clark 
921c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
922c8afe684SRob Clark 
923c8afe684SRob Clark 	if (IS_ERR(obj))
924c8afe684SRob Clark 		return PTR_ERR(obj);
925c8afe684SRob Clark 
9260815d774SJordan Crouse 	if (name)
9270815d774SJordan Crouse 		msm_gem_object_set_name(obj, "%s", name);
9280815d774SJordan Crouse 
929c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
930c8afe684SRob Clark 
931c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
932dc9a9b32SSteve Kowalik 	drm_gem_object_put_unlocked(obj);
933c8afe684SRob Clark 
934c8afe684SRob Clark 	return ret;
935c8afe684SRob Clark }
936c8afe684SRob Clark 
93705b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
93805b84911SRob Clark 		uint32_t size, uint32_t flags,
93979f0e202SRob Clark 		struct reservation_object *resv,
9400e08270aSSushmita Susheelendra 		struct drm_gem_object **obj,
9410e08270aSSushmita Susheelendra 		bool struct_mutex_locked)
942c8afe684SRob Clark {
943c8afe684SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
944c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
945c8afe684SRob Clark 
946c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
947c8afe684SRob Clark 	case MSM_BO_UNCACHED:
948c8afe684SRob Clark 	case MSM_BO_CACHED:
949c8afe684SRob Clark 	case MSM_BO_WC:
950c8afe684SRob Clark 		break;
951c8afe684SRob Clark 	default:
9526a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
953c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
95405b84911SRob Clark 		return -EINVAL;
955c8afe684SRob Clark 	}
956c8afe684SRob Clark 
957667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
95805b84911SRob Clark 	if (!msm_obj)
95905b84911SRob Clark 		return -ENOMEM;
960c8afe684SRob Clark 
9610e08270aSSushmita Susheelendra 	mutex_init(&msm_obj->lock);
9620e08270aSSushmita Susheelendra 
963c8afe684SRob Clark 	msm_obj->flags = flags;
9644cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
965c8afe684SRob Clark 
966dd55cf69SRob Herring 	if (resv)
967dd55cf69SRob Herring 		msm_obj->base.resv = resv;
968c8afe684SRob Clark 
9697198e6b0SRob Clark 	INIT_LIST_HEAD(&msm_obj->submit_entry);
9704b85f7f5SRob Clark 	INIT_LIST_HEAD(&msm_obj->vmas);
9714b85f7f5SRob Clark 
9720e08270aSSushmita Susheelendra 	if (struct_mutex_locked) {
9730e08270aSSushmita Susheelendra 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
974c8afe684SRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
9750e08270aSSushmita Susheelendra 	} else {
9760e08270aSSushmita Susheelendra 		mutex_lock(&dev->struct_mutex);
9770e08270aSSushmita Susheelendra 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
9780e08270aSSushmita Susheelendra 		mutex_unlock(&dev->struct_mutex);
9790e08270aSSushmita Susheelendra 	}
980c8afe684SRob Clark 
98105b84911SRob Clark 	*obj = &msm_obj->base;
98205b84911SRob Clark 
98305b84911SRob Clark 	return 0;
98405b84911SRob Clark }
98505b84911SRob Clark 
9860e08270aSSushmita Susheelendra static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
9870e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
98805b84911SRob Clark {
989f4839bd5SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
990871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
991f4839bd5SRob Clark 	bool use_vram = false;
99205b84911SRob Clark 	int ret;
99305b84911SRob Clark 
99405b84911SRob Clark 	size = PAGE_ALIGN(size);
99505b84911SRob Clark 
996c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev))
997f4839bd5SRob Clark 		use_vram = true;
99886f46f25SJonathan Marek 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
999f4839bd5SRob Clark 		use_vram = true;
1000f4839bd5SRob Clark 
1001f4839bd5SRob Clark 	if (WARN_ON(use_vram && !priv->vram.size))
1002f4839bd5SRob Clark 		return ERR_PTR(-EINVAL);
1003f4839bd5SRob Clark 
10041a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
10051a5dff5dSJordan Crouse 	 * infrastructure grumpy
10061a5dff5dSJordan Crouse 	 */
10071a5dff5dSJordan Crouse 	if (size == 0)
10081a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
10091a5dff5dSJordan Crouse 
10100e08270aSSushmita Susheelendra 	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
101105b84911SRob Clark 	if (ret)
101205b84911SRob Clark 		goto fail;
101305b84911SRob Clark 
1014f4839bd5SRob Clark 	if (use_vram) {
10154b85f7f5SRob Clark 		struct msm_gem_vma *vma;
1016f4839bd5SRob Clark 		struct page **pages;
1017b3949a9aSHans Verkuil 		struct msm_gem_object *msm_obj = to_msm_bo(obj);
1018b3949a9aSHans Verkuil 
1019b3949a9aSHans Verkuil 		mutex_lock(&msm_obj->lock);
1020f4839bd5SRob Clark 
10214b85f7f5SRob Clark 		vma = add_vma(obj, NULL);
1022b3949a9aSHans Verkuil 		mutex_unlock(&msm_obj->lock);
10234b85f7f5SRob Clark 		if (IS_ERR(vma)) {
10244b85f7f5SRob Clark 			ret = PTR_ERR(vma);
10254b85f7f5SRob Clark 			goto fail;
10264b85f7f5SRob Clark 		}
10274b85f7f5SRob Clark 
10284b85f7f5SRob Clark 		to_msm_bo(obj)->vram_node = &vma->node;
10294b85f7f5SRob Clark 
1030f4839bd5SRob Clark 		drm_gem_private_object_init(dev, obj, size);
1031f4839bd5SRob Clark 
1032f4839bd5SRob Clark 		pages = get_pages(obj);
1033f4839bd5SRob Clark 		if (IS_ERR(pages)) {
1034f4839bd5SRob Clark 			ret = PTR_ERR(pages);
1035f4839bd5SRob Clark 			goto fail;
1036f4839bd5SRob Clark 		}
10374b85f7f5SRob Clark 
10384b85f7f5SRob Clark 		vma->iova = physaddr(obj);
1039f4839bd5SRob Clark 	} else {
104005b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
104105b84911SRob Clark 		if (ret)
104205b84911SRob Clark 			goto fail;
10430abdba47SLucas Stach 		/*
10440abdba47SLucas Stach 		 * Our buffers are kept pinned, so allocating them from the
10450abdba47SLucas Stach 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
10460abdba47SLucas Stach 		 * See comments above new_inode() why this is required _and_
10470abdba47SLucas Stach 		 * expected if you're going to pin these pages.
10480abdba47SLucas Stach 		 */
10490abdba47SLucas Stach 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1050871d812aSRob Clark 	}
105105b84911SRob Clark 
105205b84911SRob Clark 	return obj;
105305b84911SRob Clark 
105405b84911SRob Clark fail:
1055dc9a9b32SSteve Kowalik 	drm_gem_object_put_unlocked(obj);
105605b84911SRob Clark 	return ERR_PTR(ret);
105705b84911SRob Clark }
105805b84911SRob Clark 
10590e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
10600e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags)
10610e08270aSSushmita Susheelendra {
10620e08270aSSushmita Susheelendra 	return _msm_gem_new(dev, size, flags, true);
10630e08270aSSushmita Susheelendra }
10640e08270aSSushmita Susheelendra 
10650e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new(struct drm_device *dev,
10660e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags)
10670e08270aSSushmita Susheelendra {
10680e08270aSSushmita Susheelendra 	return _msm_gem_new(dev, size, flags, false);
10690e08270aSSushmita Susheelendra }
10700e08270aSSushmita Susheelendra 
107105b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
107279f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
107305b84911SRob Clark {
107405b84911SRob Clark 	struct msm_gem_object *msm_obj;
107505b84911SRob Clark 	struct drm_gem_object *obj;
107679f0e202SRob Clark 	uint32_t size;
107705b84911SRob Clark 	int ret, npages;
107805b84911SRob Clark 
1079871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
1080c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev)) {
10816a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1082871d812aSRob Clark 		return ERR_PTR(-EINVAL);
1083871d812aSRob Clark 	}
1084871d812aSRob Clark 
108579f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
108605b84911SRob Clark 
10870e08270aSSushmita Susheelendra 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
108805b84911SRob Clark 	if (ret)
108905b84911SRob Clark 		goto fail;
109005b84911SRob Clark 
109105b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
109205b84911SRob Clark 
109305b84911SRob Clark 	npages = size / PAGE_SIZE;
109405b84911SRob Clark 
109505b84911SRob Clark 	msm_obj = to_msm_bo(obj);
10960e08270aSSushmita Susheelendra 	mutex_lock(&msm_obj->lock);
109705b84911SRob Clark 	msm_obj->sgt = sgt;
10982098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
109905b84911SRob Clark 	if (!msm_obj->pages) {
11000e08270aSSushmita Susheelendra 		mutex_unlock(&msm_obj->lock);
110105b84911SRob Clark 		ret = -ENOMEM;
110205b84911SRob Clark 		goto fail;
110305b84911SRob Clark 	}
110405b84911SRob Clark 
110505b84911SRob Clark 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
11060e08270aSSushmita Susheelendra 	if (ret) {
11070e08270aSSushmita Susheelendra 		mutex_unlock(&msm_obj->lock);
110805b84911SRob Clark 		goto fail;
11090e08270aSSushmita Susheelendra 	}
111005b84911SRob Clark 
11110e08270aSSushmita Susheelendra 	mutex_unlock(&msm_obj->lock);
1112c8afe684SRob Clark 	return obj;
1113c8afe684SRob Clark 
1114c8afe684SRob Clark fail:
1115dc9a9b32SSteve Kowalik 	drm_gem_object_put_unlocked(obj);
1116c8afe684SRob Clark 	return ERR_PTR(ret);
1117c8afe684SRob Clark }
11188223286dSJordan Crouse 
11198223286dSJordan Crouse static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
11208223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
11218223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
11228223286dSJordan Crouse {
11238223286dSJordan Crouse 	void *vaddr;
11248223286dSJordan Crouse 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
11258223286dSJordan Crouse 	int ret;
11268223286dSJordan Crouse 
11278223286dSJordan Crouse 	if (IS_ERR(obj))
11288223286dSJordan Crouse 		return ERR_CAST(obj);
11298223286dSJordan Crouse 
11308223286dSJordan Crouse 	if (iova) {
11319fe041f6SJordan Crouse 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
113293f7abf1SJordan Crouse 		if (ret)
113393f7abf1SJordan Crouse 			goto err;
11348223286dSJordan Crouse 	}
11358223286dSJordan Crouse 
11368223286dSJordan Crouse 	vaddr = msm_gem_get_vaddr(obj);
1137c9811d0fSWei Yongjun 	if (IS_ERR(vaddr)) {
11387ad0e8cfSJordan Crouse 		msm_gem_unpin_iova(obj, aspace);
113993f7abf1SJordan Crouse 		ret = PTR_ERR(vaddr);
114093f7abf1SJordan Crouse 		goto err;
11418223286dSJordan Crouse 	}
11428223286dSJordan Crouse 
11438223286dSJordan Crouse 	if (bo)
11448223286dSJordan Crouse 		*bo = obj;
11458223286dSJordan Crouse 
11468223286dSJordan Crouse 	return vaddr;
114793f7abf1SJordan Crouse err:
114893f7abf1SJordan Crouse 	if (locked)
114993f7abf1SJordan Crouse 		drm_gem_object_put(obj);
115093f7abf1SJordan Crouse 	else
115193f7abf1SJordan Crouse 		drm_gem_object_put_unlocked(obj);
115293f7abf1SJordan Crouse 
115393f7abf1SJordan Crouse 	return ERR_PTR(ret);
115493f7abf1SJordan Crouse 
11558223286dSJordan Crouse }
11568223286dSJordan Crouse 
11578223286dSJordan Crouse void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
11588223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
11598223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova)
11608223286dSJordan Crouse {
11618223286dSJordan Crouse 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
11628223286dSJordan Crouse }
11638223286dSJordan Crouse 
11648223286dSJordan Crouse void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
11658223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
11668223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova)
11678223286dSJordan Crouse {
11688223286dSJordan Crouse 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
11698223286dSJordan Crouse }
11701e29dff0SJordan Crouse 
11711e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo,
11721e29dff0SJordan Crouse 		struct msm_gem_address_space *aspace, bool locked)
11731e29dff0SJordan Crouse {
11741e29dff0SJordan Crouse 	if (IS_ERR_OR_NULL(bo))
11751e29dff0SJordan Crouse 		return;
11761e29dff0SJordan Crouse 
11771e29dff0SJordan Crouse 	msm_gem_put_vaddr(bo);
11787ad0e8cfSJordan Crouse 	msm_gem_unpin_iova(bo, aspace);
11791e29dff0SJordan Crouse 
11801e29dff0SJordan Crouse 	if (locked)
11811e29dff0SJordan Crouse 		drm_gem_object_put(bo);
11821e29dff0SJordan Crouse 	else
11831e29dff0SJordan Crouse 		drm_gem_object_put_unlocked(bo);
11841e29dff0SJordan Crouse }
11850815d774SJordan Crouse 
11860815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
11870815d774SJordan Crouse {
11880815d774SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
11890815d774SJordan Crouse 	va_list ap;
11900815d774SJordan Crouse 
11910815d774SJordan Crouse 	if (!fmt)
11920815d774SJordan Crouse 		return;
11930815d774SJordan Crouse 
11940815d774SJordan Crouse 	va_start(ap, fmt);
11950815d774SJordan Crouse 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
11960815d774SJordan Crouse 	va_end(ap);
11970815d774SJordan Crouse }
1198