xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 37c68900)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8afe684SRob Clark /*
3c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
4c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5c8afe684SRob Clark  */
6c8afe684SRob Clark 
70a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
8c8afe684SRob Clark #include <linux/spinlock.h>
9c8afe684SRob Clark #include <linux/shmem_fs.h>
1005b84911SRob Clark #include <linux/dma-buf.h>
1101c8f1c4SDan Williams #include <linux/pfn_t.h>
12c8afe684SRob Clark 
13feea39a8SSam Ravnborg #include <drm/drm_prime.h>
14feea39a8SSam Ravnborg 
15c8afe684SRob Clark #include "msm_drv.h"
16fde5de6cSRob Clark #include "msm_fence.h"
17c8afe684SRob Clark #include "msm_gem.h"
187198e6b0SRob Clark #include "msm_gpu.h"
19871d812aSRob Clark #include "msm_mmu.h"
20c8afe684SRob Clark 
213edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj);
220e08270aSSushmita Susheelendra 
23871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
24871d812aSRob Clark {
25871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
26871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
27871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28871d812aSRob Clark 			priv->vram.paddr;
29871d812aSRob Clark }
30871d812aSRob Clark 
31072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
32072f1f91SRob Clark {
33072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
34072f1f91SRob Clark 	return !msm_obj->vram_node;
35072f1f91SRob Clark }
36072f1f91SRob Clark 
373de433c5SRob Clark /*
383de433c5SRob Clark  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
393de433c5SRob Clark  * API.  Really GPU cache is out of scope here (handled on cmdstream)
403de433c5SRob Clark  * and all we need to do is invalidate newly allocated pages before
413de433c5SRob Clark  * mapping to CPU as uncached/writecombine.
423de433c5SRob Clark  *
433de433c5SRob Clark  * On top of this, we have the added headache, that depending on
443de433c5SRob Clark  * display generation, the display's iommu may be wired up to either
453de433c5SRob Clark  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
463de433c5SRob Clark  * that here we either have dma-direct or iommu ops.
473de433c5SRob Clark  *
483de433c5SRob Clark  * Let this be a cautionary tail of abstraction gone wrong.
493de433c5SRob Clark  */
503de433c5SRob Clark 
513de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj)
523de433c5SRob Clark {
533de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
543de433c5SRob Clark 
557690a33fSMarek Szyprowski 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
563de433c5SRob Clark }
573de433c5SRob Clark 
583de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj)
593de433c5SRob Clark {
603de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
613de433c5SRob Clark 
627690a33fSMarek Szyprowski 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
633de433c5SRob Clark }
643de433c5SRob Clark 
65871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
660e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
67871d812aSRob Clark {
68871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
69871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
70871d812aSRob Clark 	dma_addr_t paddr;
71871d812aSRob Clark 	struct page **p;
72871d812aSRob Clark 	int ret, i;
73871d812aSRob Clark 
742098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
75871d812aSRob Clark 	if (!p)
76871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
77871d812aSRob Clark 
780e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
794e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
800e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
81871d812aSRob Clark 	if (ret) {
822098105eSMichal Hocko 		kvfree(p);
83871d812aSRob Clark 		return ERR_PTR(ret);
84871d812aSRob Clark 	}
85871d812aSRob Clark 
86871d812aSRob Clark 	paddr = physaddr(obj);
87871d812aSRob Clark 	for (i = 0; i < npages; i++) {
88871d812aSRob Clark 		p[i] = phys_to_page(paddr);
89871d812aSRob Clark 		paddr += PAGE_SIZE;
90871d812aSRob Clark 	}
91871d812aSRob Clark 
92871d812aSRob Clark 	return p;
93871d812aSRob Clark }
94c8afe684SRob Clark 
95c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
96c8afe684SRob Clark {
97c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
98c8afe684SRob Clark 
9990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
10007fcad0dSIskren Chernev 
101c8afe684SRob Clark 	if (!msm_obj->pages) {
102c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
103871d812aSRob Clark 		struct page **p;
104c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
105c8afe684SRob Clark 
106072f1f91SRob Clark 		if (use_pages(obj))
1070cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
108871d812aSRob Clark 		else
109871d812aSRob Clark 			p = get_pages_vram(obj, npages);
110871d812aSRob Clark 
111c8afe684SRob Clark 		if (IS_ERR(p)) {
1126a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
113c8afe684SRob Clark 					PTR_ERR(p));
114c8afe684SRob Clark 			return p;
115c8afe684SRob Clark 		}
116c8afe684SRob Clark 
11762e3a3e3SPrakash Kamliya 		msm_obj->pages = p;
11862e3a3e3SPrakash Kamliya 
119707d561fSGerd Hoffmann 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1201f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
12162e3a3e3SPrakash Kamliya 			void *ptr = ERR_CAST(msm_obj->sgt);
122c8afe684SRob Clark 
1236a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
12462e3a3e3SPrakash Kamliya 			msm_obj->sgt = NULL;
12562e3a3e3SPrakash Kamliya 			return ptr;
12662e3a3e3SPrakash Kamliya 		}
127c8afe684SRob Clark 
128c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
129c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
130c8afe684SRob Clark 		 */
131c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1323de433c5SRob Clark 			sync_for_device(msm_obj);
13364fcbde7SRob Clark 
13464fcbde7SRob Clark 		GEM_WARN_ON(msm_obj->active_count);
13564fcbde7SRob Clark 		update_inactive(msm_obj);
136c8afe684SRob Clark 	}
137c8afe684SRob Clark 
138c8afe684SRob Clark 	return msm_obj->pages;
139c8afe684SRob Clark }
140c8afe684SRob Clark 
1410e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj)
1420e08270aSSushmita Susheelendra {
1430e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1440e08270aSSushmita Susheelendra 	struct msm_drm_private *priv = obj->dev->dev_private;
1450e08270aSSushmita Susheelendra 
1460e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
1470e08270aSSushmita Susheelendra 	drm_mm_remove_node(msm_obj->vram_node);
1480e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
1490e08270aSSushmita Susheelendra 
1500e08270aSSushmita Susheelendra 	kvfree(msm_obj->pages);
1510e08270aSSushmita Susheelendra }
1520e08270aSSushmita Susheelendra 
153c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
154c8afe684SRob Clark {
155c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156c8afe684SRob Clark 
157c8afe684SRob Clark 	if (msm_obj->pages) {
1583976626eSBen Hutchings 		if (msm_obj->sgt) {
1593976626eSBen Hutchings 			/* For non-cached buffers, ensure the new
1603976626eSBen Hutchings 			 * pages are clean because display controller,
1613976626eSBen Hutchings 			 * GPU, etc. are not coherent:
162c8afe684SRob Clark 			 */
163c8afe684SRob Clark 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1643de433c5SRob Clark 				sync_for_cpu(msm_obj);
16562e3a3e3SPrakash Kamliya 
166c8afe684SRob Clark 			sg_free_table(msm_obj->sgt);
167c8afe684SRob Clark 			kfree(msm_obj->sgt);
168b9a31d0dSRob Clark 			msm_obj->sgt = NULL;
1693976626eSBen Hutchings 		}
170c8afe684SRob Clark 
171072f1f91SRob Clark 		if (use_pages(obj))
172c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1730e08270aSSushmita Susheelendra 		else
1740e08270aSSushmita Susheelendra 			put_pages_vram(obj);
175871d812aSRob Clark 
176c8afe684SRob Clark 		msm_obj->pages = NULL;
177c8afe684SRob Clark 	}
178c8afe684SRob Clark }
179c8afe684SRob Clark 
18005b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
18105b84911SRob Clark {
1820e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
18305b84911SRob Clark 	struct page **p;
1840e08270aSSushmita Susheelendra 
185a6ae74c9SRob Clark 	msm_gem_lock(obj);
1860e08270aSSushmita Susheelendra 
18790643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188a6ae74c9SRob Clark 		msm_gem_unlock(obj);
1890e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
1900e08270aSSushmita Susheelendra 	}
1910e08270aSSushmita Susheelendra 
19205b84911SRob Clark 	p = get_pages(obj);
19310f76165SRob Clark 
19410f76165SRob Clark 	if (!IS_ERR(p)) {
19510f76165SRob Clark 		msm_obj->pin_count++;
19610f76165SRob Clark 		update_inactive(msm_obj);
19710f76165SRob Clark 	}
19810f76165SRob Clark 
199a6ae74c9SRob Clark 	msm_gem_unlock(obj);
20005b84911SRob Clark 	return p;
20105b84911SRob Clark }
20205b84911SRob Clark 
20305b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
20405b84911SRob Clark {
20510f76165SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
20610f76165SRob Clark 
20710f76165SRob Clark 	msm_gem_lock(obj);
20810f76165SRob Clark 	msm_obj->pin_count--;
20910f76165SRob Clark 	GEM_WARN_ON(msm_obj->pin_count < 0);
21010f76165SRob Clark 	update_inactive(msm_obj);
21110f76165SRob Clark 	msm_gem_unlock(obj);
21205b84911SRob Clark }
21305b84911SRob Clark 
214af9b3547SJonathan Marek static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
215af9b3547SJonathan Marek {
2169ef36443SJonathan Marek 	if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
217af9b3547SJonathan Marek 		return pgprot_writecombine(prot);
218af9b3547SJonathan Marek 	return prot;
219af9b3547SJonathan Marek }
220af9b3547SJonathan Marek 
221c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj,
222c8afe684SRob Clark 		struct vm_area_struct *vma)
223c8afe684SRob Clark {
224c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
225c8afe684SRob Clark 
226c8afe684SRob Clark 	vma->vm_flags &= ~VM_PFNMAP;
227c8afe684SRob Clark 	vma->vm_flags |= VM_MIXEDMAP;
228af9b3547SJonathan Marek 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
229c8afe684SRob Clark 
230c8afe684SRob Clark 	return 0;
231c8afe684SRob Clark }
232c8afe684SRob Clark 
233c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
234c8afe684SRob Clark {
235c8afe684SRob Clark 	int ret;
236c8afe684SRob Clark 
237c8afe684SRob Clark 	ret = drm_gem_mmap(filp, vma);
238c8afe684SRob Clark 	if (ret) {
239c8afe684SRob Clark 		DBG("mmap failed: %d", ret);
240c8afe684SRob Clark 		return ret;
241c8afe684SRob Clark 	}
242c8afe684SRob Clark 
243c8afe684SRob Clark 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
244c8afe684SRob Clark }
245c8afe684SRob Clark 
2463c9edd9cSThomas Zimmermann static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
247c8afe684SRob Clark {
24811bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
249c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
2500e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
251c8afe684SRob Clark 	struct page **pages;
252c8afe684SRob Clark 	unsigned long pfn;
253c8afe684SRob Clark 	pgoff_t pgoff;
254a5f74ec7SSouptick Joarder 	int err;
255a5f74ec7SSouptick Joarder 	vm_fault_t ret;
256c8afe684SRob Clark 
2570e08270aSSushmita Susheelendra 	/*
2580e08270aSSushmita Susheelendra 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
2590e08270aSSushmita Susheelendra 	 * a reference on obj. So, we dont need to hold one here.
260d78d383aSRob Clark 	 */
261a6ae74c9SRob Clark 	err = msm_gem_lock_interruptible(obj);
262a5f74ec7SSouptick Joarder 	if (err) {
263a5f74ec7SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
264c8afe684SRob Clark 		goto out;
265a5f74ec7SSouptick Joarder 	}
266c8afe684SRob Clark 
26790643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
268a6ae74c9SRob Clark 		msm_gem_unlock(obj);
2690e08270aSSushmita Susheelendra 		return VM_FAULT_SIGBUS;
2700e08270aSSushmita Susheelendra 	}
2710e08270aSSushmita Susheelendra 
272c8afe684SRob Clark 	/* make sure we have pages attached now */
273c8afe684SRob Clark 	pages = get_pages(obj);
274c8afe684SRob Clark 	if (IS_ERR(pages)) {
275a5f74ec7SSouptick Joarder 		ret = vmf_error(PTR_ERR(pages));
276c8afe684SRob Clark 		goto out_unlock;
277c8afe684SRob Clark 	}
278c8afe684SRob Clark 
279c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2801a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
281c8afe684SRob Clark 
282871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
283c8afe684SRob Clark 
2841a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
285c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
286c8afe684SRob Clark 
287a5f74ec7SSouptick Joarder 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
288c8afe684SRob Clark out_unlock:
289a6ae74c9SRob Clark 	msm_gem_unlock(obj);
290c8afe684SRob Clark out:
291a5f74ec7SSouptick Joarder 	return ret;
292c8afe684SRob Clark }
293c8afe684SRob Clark 
294c8afe684SRob Clark /** get mmap offset */
295c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
296c8afe684SRob Clark {
297c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
298c8afe684SRob Clark 	int ret;
299c8afe684SRob Clark 
30090643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
301c8afe684SRob Clark 
302c8afe684SRob Clark 	/* Make it mmapable */
303c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
304c8afe684SRob Clark 
305c8afe684SRob Clark 	if (ret) {
3066a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
307c8afe684SRob Clark 		return 0;
308c8afe684SRob Clark 	}
309c8afe684SRob Clark 
310c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
311c8afe684SRob Clark }
312c8afe684SRob Clark 
313c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
314c8afe684SRob Clark {
315c8afe684SRob Clark 	uint64_t offset;
3160e08270aSSushmita Susheelendra 
317a6ae74c9SRob Clark 	msm_gem_lock(obj);
318c8afe684SRob Clark 	offset = mmap_offset(obj);
319a6ae74c9SRob Clark 	msm_gem_unlock(obj);
320c8afe684SRob Clark 	return offset;
321c8afe684SRob Clark }
322c8afe684SRob Clark 
3234b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
3244b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3254b85f7f5SRob Clark {
3264b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3274b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3284b85f7f5SRob Clark 
32990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3300e08270aSSushmita Susheelendra 
3314b85f7f5SRob Clark 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3324b85f7f5SRob Clark 	if (!vma)
3334b85f7f5SRob Clark 		return ERR_PTR(-ENOMEM);
3344b85f7f5SRob Clark 
3354b85f7f5SRob Clark 	vma->aspace = aspace;
3364b85f7f5SRob Clark 
3374b85f7f5SRob Clark 	list_add_tail(&vma->list, &msm_obj->vmas);
3384b85f7f5SRob Clark 
3394b85f7f5SRob Clark 	return vma;
3404b85f7f5SRob Clark }
3414b85f7f5SRob Clark 
3424b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
3434b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3444b85f7f5SRob Clark {
3454b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3464b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3474b85f7f5SRob Clark 
34890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3494b85f7f5SRob Clark 
3504b85f7f5SRob Clark 	list_for_each_entry(vma, &msm_obj->vmas, list) {
3514b85f7f5SRob Clark 		if (vma->aspace == aspace)
3524b85f7f5SRob Clark 			return vma;
3534b85f7f5SRob Clark 	}
3544b85f7f5SRob Clark 
3554b85f7f5SRob Clark 	return NULL;
3564b85f7f5SRob Clark }
3574b85f7f5SRob Clark 
3584b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma)
3594b85f7f5SRob Clark {
3604b85f7f5SRob Clark 	if (!vma)
3614b85f7f5SRob Clark 		return;
3624b85f7f5SRob Clark 
3634b85f7f5SRob Clark 	list_del(&vma->list);
3644b85f7f5SRob Clark 	kfree(vma);
3654b85f7f5SRob Clark }
3664b85f7f5SRob Clark 
367*37c68900SLee Jones /*
36820d0ae2fSRob Clark  * If close is true, this also closes the VMA (releasing the allocated
36920d0ae2fSRob Clark  * iova range) in addition to removing the iommu mapping.  In the eviction
37020d0ae2fSRob Clark  * case (!close), we keep the iova allocated, but only remove the iommu
37120d0ae2fSRob Clark  * mapping.
37220d0ae2fSRob Clark  */
3734fe5f65eSRob Clark static void
37420d0ae2fSRob Clark put_iova_spaces(struct drm_gem_object *obj, bool close)
3754fe5f65eSRob Clark {
3764fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3779b73bde3SIskren Chernev 	struct msm_gem_vma *vma;
3784fe5f65eSRob Clark 
37990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3804fe5f65eSRob Clark 
3819b73bde3SIskren Chernev 	list_for_each_entry(vma, &msm_obj->vmas, list) {
382d67f1b6dSBrian Masney 		if (vma->aspace) {
3837ad0e8cfSJordan Crouse 			msm_gem_purge_vma(vma->aspace, vma);
38420d0ae2fSRob Clark 			if (close)
3857ad0e8cfSJordan Crouse 				msm_gem_close_vma(vma->aspace, vma);
386d67f1b6dSBrian Masney 		}
3879b73bde3SIskren Chernev 	}
3889b73bde3SIskren Chernev }
3899b73bde3SIskren Chernev 
3909b73bde3SIskren Chernev /* Called with msm_obj locked */
3919b73bde3SIskren Chernev static void
3929b73bde3SIskren Chernev put_iova_vmas(struct drm_gem_object *obj)
3934fe5f65eSRob Clark {
3944fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3954fe5f65eSRob Clark 	struct msm_gem_vma *vma, *tmp;
3964fe5f65eSRob Clark 
39790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3984fe5f65eSRob Clark 
3994fe5f65eSRob Clark 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
4004b85f7f5SRob Clark 		del_vma(vma);
4014fe5f65eSRob Clark 	}
4024fe5f65eSRob Clark }
4034fe5f65eSRob Clark 
4048117e5e5SRob Clark static int get_iova_locked(struct drm_gem_object *obj,
405d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
406d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
407c8afe684SRob Clark {
4084b85f7f5SRob Clark 	struct msm_gem_vma *vma;
409c8afe684SRob Clark 	int ret = 0;
410c8afe684SRob Clark 
41190643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
412cb1e3818SRob Clark 
4134b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
414871d812aSRob Clark 
4154b85f7f5SRob Clark 	if (!vma) {
4164b85f7f5SRob Clark 		vma = add_vma(obj, aspace);
417c0ee9794SJordan Crouse 		if (IS_ERR(vma))
418c0ee9794SJordan Crouse 			return PTR_ERR(vma);
4194b85f7f5SRob Clark 
420d3b8877eSJonathan Marek 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
421d3b8877eSJonathan Marek 			range_start, range_end);
422c0ee9794SJordan Crouse 		if (ret) {
423c0ee9794SJordan Crouse 			del_vma(vma);
424c0ee9794SJordan Crouse 			return ret;
425c8afe684SRob Clark 		}
4264b85f7f5SRob Clark 	}
4274b85f7f5SRob Clark 
4284b85f7f5SRob Clark 	*iova = vma->iova;
4294b85f7f5SRob Clark 	return 0;
430c0ee9794SJordan Crouse }
4314b85f7f5SRob Clark 
432c0ee9794SJordan Crouse static int msm_gem_pin_iova(struct drm_gem_object *obj,
433c0ee9794SJordan Crouse 		struct msm_gem_address_space *aspace)
434c0ee9794SJordan Crouse {
435c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
436c0ee9794SJordan Crouse 	struct msm_gem_vma *vma;
437c0ee9794SJordan Crouse 	struct page **pages;
43864fcbde7SRob Clark 	int ret, prot = IOMMU_READ;
439bbc2cd07SRob Clark 
440bbc2cd07SRob Clark 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
441bbc2cd07SRob Clark 		prot |= IOMMU_WRITE;
442c0ee9794SJordan Crouse 
4430b462d7aSJonathan Marek 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
4440b462d7aSJonathan Marek 		prot |= IOMMU_PRIV;
4450b462d7aSJonathan Marek 
446d12e3390SJonathan Marek 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
447d12e3390SJonathan Marek 		prot |= IOMMU_CACHE;
448d12e3390SJonathan Marek 
44990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
450c0ee9794SJordan Crouse 
45190643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
452c0ee9794SJordan Crouse 		return -EBUSY;
453c0ee9794SJordan Crouse 
454c0ee9794SJordan Crouse 	vma = lookup_vma(obj, aspace);
45590643a24SRob Clark 	if (GEM_WARN_ON(!vma))
456c0ee9794SJordan Crouse 		return -EINVAL;
457c0ee9794SJordan Crouse 
458c0ee9794SJordan Crouse 	pages = get_pages(obj);
459c0ee9794SJordan Crouse 	if (IS_ERR(pages))
460c0ee9794SJordan Crouse 		return PTR_ERR(pages);
461c0ee9794SJordan Crouse 
46264fcbde7SRob Clark 	ret = msm_gem_map_vma(aspace, vma, prot,
463bbc2cd07SRob Clark 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
46464fcbde7SRob Clark 
46564fcbde7SRob Clark 	if (!ret)
46664fcbde7SRob Clark 		msm_obj->pin_count++;
46764fcbde7SRob Clark 
46864fcbde7SRob Clark 	return ret;
469c0ee9794SJordan Crouse }
470c0ee9794SJordan Crouse 
471e4b87d22SRob Clark static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
472d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
473d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
474c0ee9794SJordan Crouse {
475c0ee9794SJordan Crouse 	u64 local;
476c0ee9794SJordan Crouse 	int ret;
477c0ee9794SJordan Crouse 
47890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
479c0ee9794SJordan Crouse 
4808117e5e5SRob Clark 	ret = get_iova_locked(obj, aspace, &local,
481d3b8877eSJonathan Marek 		range_start, range_end);
482c0ee9794SJordan Crouse 
483c0ee9794SJordan Crouse 	if (!ret)
484c0ee9794SJordan Crouse 		ret = msm_gem_pin_iova(obj, aspace);
485c0ee9794SJordan Crouse 
486c0ee9794SJordan Crouse 	if (!ret)
487c0ee9794SJordan Crouse 		*iova = local;
488c0ee9794SJordan Crouse 
489c8afe684SRob Clark 	return ret;
490c8afe684SRob Clark }
491c8afe684SRob Clark 
492e4b87d22SRob Clark /*
493e4b87d22SRob Clark  * get iova and pin it. Should have a matching put
494e4b87d22SRob Clark  * limits iova to specified range (in pages)
495e4b87d22SRob Clark  */
496e4b87d22SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
497e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova,
498e4b87d22SRob Clark 		u64 range_start, u64 range_end)
499e4b87d22SRob Clark {
500e4b87d22SRob Clark 	int ret;
501e4b87d22SRob Clark 
502e4b87d22SRob Clark 	msm_gem_lock(obj);
503e4b87d22SRob Clark 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
504e4b87d22SRob Clark 	msm_gem_unlock(obj);
505e4b87d22SRob Clark 
506e4b87d22SRob Clark 	return ret;
507e4b87d22SRob Clark }
508e4b87d22SRob Clark 
509e4b87d22SRob Clark int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
510e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova)
511e4b87d22SRob Clark {
512e4b87d22SRob Clark 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
513e4b87d22SRob Clark }
514e4b87d22SRob Clark 
515d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */
516d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
517d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova)
518d3b8877eSJonathan Marek {
519d3b8877eSJonathan Marek 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
520d3b8877eSJonathan Marek }
521d3b8877eSJonathan Marek 
5227ad0e8cfSJordan Crouse /*
5237ad0e8cfSJordan Crouse  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
5247ad0e8cfSJordan Crouse  * valid for the life of the object
5257ad0e8cfSJordan Crouse  */
5269fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj,
5279fe041f6SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
5289fe041f6SJordan Crouse {
5299fe041f6SJordan Crouse 	int ret;
5309fe041f6SJordan Crouse 
531a6ae74c9SRob Clark 	msm_gem_lock(obj);
5328117e5e5SRob Clark 	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
533a6ae74c9SRob Clark 	msm_gem_unlock(obj);
5349fe041f6SJordan Crouse 
5359fe041f6SJordan Crouse 	return ret;
5369fe041f6SJordan Crouse }
5379fe041f6SJordan Crouse 
5382638d90aSRob Clark /* get iova without taking a reference, used in places where you have
5399fe041f6SJordan Crouse  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
5402638d90aSRob Clark  */
5418bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj,
5428bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
5432638d90aSRob Clark {
5444b85f7f5SRob Clark 	struct msm_gem_vma *vma;
5454b85f7f5SRob Clark 
546a6ae74c9SRob Clark 	msm_gem_lock(obj);
5474b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
548a6ae74c9SRob Clark 	msm_gem_unlock(obj);
54990643a24SRob Clark 	GEM_WARN_ON(!vma);
5504b85f7f5SRob Clark 
5514b85f7f5SRob Clark 	return vma ? vma->iova : 0;
5522638d90aSRob Clark }
5532638d90aSRob Clark 
5547ad0e8cfSJordan Crouse /*
555e4b87d22SRob Clark  * Locked variant of msm_gem_unpin_iova()
556e4b87d22SRob Clark  */
557e4b87d22SRob Clark void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
558e4b87d22SRob Clark 		struct msm_gem_address_space *aspace)
559e4b87d22SRob Clark {
56064fcbde7SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
561e4b87d22SRob Clark 	struct msm_gem_vma *vma;
562e4b87d22SRob Clark 
56390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
564e4b87d22SRob Clark 
565e4b87d22SRob Clark 	vma = lookup_vma(obj, aspace);
566e4b87d22SRob Clark 
56764fcbde7SRob Clark 	if (!GEM_WARN_ON(!vma)) {
568e4b87d22SRob Clark 		msm_gem_unmap_vma(aspace, vma);
56964fcbde7SRob Clark 
57064fcbde7SRob Clark 		msm_obj->pin_count--;
57164fcbde7SRob Clark 		GEM_WARN_ON(msm_obj->pin_count < 0);
57264fcbde7SRob Clark 
57364fcbde7SRob Clark 		update_inactive(msm_obj);
57464fcbde7SRob Clark 	}
575e4b87d22SRob Clark }
576e4b87d22SRob Clark 
577e4b87d22SRob Clark /*
5787ad0e8cfSJordan Crouse  * Unpin a iova by updating the reference counts. The memory isn't actually
5797ad0e8cfSJordan Crouse  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
5807ad0e8cfSJordan Crouse  * to get rid of it
5817ad0e8cfSJordan Crouse  */
5827ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj,
5838bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
584c8afe684SRob Clark {
585a6ae74c9SRob Clark 	msm_gem_lock(obj);
586e4b87d22SRob Clark 	msm_gem_unpin_iova_locked(obj, aspace);
587a6ae74c9SRob Clark 	msm_gem_unlock(obj);
588c8afe684SRob Clark }
589c8afe684SRob Clark 
590c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
591c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
592c8afe684SRob Clark {
593c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
594c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
595c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
5960815d774SJordan Crouse 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
597c8afe684SRob Clark }
598c8afe684SRob Clark 
599c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
600c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
601c8afe684SRob Clark {
602c8afe684SRob Clark 	struct drm_gem_object *obj;
603c8afe684SRob Clark 	int ret = 0;
604c8afe684SRob Clark 
605c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
606a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
607c8afe684SRob Clark 	if (obj == NULL) {
608c8afe684SRob Clark 		ret = -ENOENT;
609c8afe684SRob Clark 		goto fail;
610c8afe684SRob Clark 	}
611c8afe684SRob Clark 
612c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
613c8afe684SRob Clark 
614f7d33950SEmil Velikov 	drm_gem_object_put(obj);
615c8afe684SRob Clark 
616c8afe684SRob Clark fail:
617c8afe684SRob Clark 	return ret;
618c8afe684SRob Clark }
619c8afe684SRob Clark 
620fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
621c8afe684SRob Clark {
6220e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6230e08270aSSushmita Susheelendra 	int ret = 0;
6240e08270aSSushmita Susheelendra 
62590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
626e4b87d22SRob Clark 
6278b6b7d84SDaniel Vetter 	if (obj->import_attach)
6288b6b7d84SDaniel Vetter 		return ERR_PTR(-ENODEV);
6298b6b7d84SDaniel Vetter 
63090643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
6316a41da17SMamta Shukla 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
632fad33f4bSRob Clark 			msm_obj->madv, madv);
6330e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
634c8afe684SRob Clark 	}
635c8afe684SRob Clark 
6360e08270aSSushmita Susheelendra 	/* increment vmap_count *before* vmap() call, so shrinker can
637a6ae74c9SRob Clark 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
6380e08270aSSushmita Susheelendra 	 * This guarantees that we won't try to msm_gem_vunmap() this
6390e08270aSSushmita Susheelendra 	 * same object from within the vmap() call (while we already
640a6ae74c9SRob Clark 	 * hold msm_obj lock)
6410e08270aSSushmita Susheelendra 	 */
6420e08270aSSushmita Susheelendra 	msm_obj->vmap_count++;
6430e08270aSSushmita Susheelendra 
6440e08270aSSushmita Susheelendra 	if (!msm_obj->vaddr) {
6450e08270aSSushmita Susheelendra 		struct page **pages = get_pages(obj);
6460e08270aSSushmita Susheelendra 		if (IS_ERR(pages)) {
6470e08270aSSushmita Susheelendra 			ret = PTR_ERR(pages);
6480e08270aSSushmita Susheelendra 			goto fail;
6490e08270aSSushmita Susheelendra 		}
6500e08270aSSushmita Susheelendra 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
651af9b3547SJonathan Marek 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
6520e08270aSSushmita Susheelendra 		if (msm_obj->vaddr == NULL) {
6530e08270aSSushmita Susheelendra 			ret = -ENOMEM;
6540e08270aSSushmita Susheelendra 			goto fail;
6550e08270aSSushmita Susheelendra 		}
65610f76165SRob Clark 
65710f76165SRob Clark 		update_inactive(msm_obj);
6580e08270aSSushmita Susheelendra 	}
6590e08270aSSushmita Susheelendra 
6600e08270aSSushmita Susheelendra 	return msm_obj->vaddr;
6610e08270aSSushmita Susheelendra 
6620e08270aSSushmita Susheelendra fail:
663e1e9db2cSRob Clark 	msm_obj->vmap_count--;
6640e08270aSSushmita Susheelendra 	return ERR_PTR(ret);
66518f23049SRob Clark }
66618f23049SRob Clark 
667e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
668e4b87d22SRob Clark {
669e4b87d22SRob Clark 	return get_vaddr(obj, MSM_MADV_WILLNEED);
670e4b87d22SRob Clark }
671e4b87d22SRob Clark 
672fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
673fad33f4bSRob Clark {
674e4b87d22SRob Clark 	void *ret;
675e4b87d22SRob Clark 
676e4b87d22SRob Clark 	msm_gem_lock(obj);
677e4b87d22SRob Clark 	ret = msm_gem_get_vaddr_locked(obj);
678e4b87d22SRob Clark 	msm_gem_unlock(obj);
679e4b87d22SRob Clark 
680e4b87d22SRob Clark 	return ret;
681fad33f4bSRob Clark }
682fad33f4bSRob Clark 
683fad33f4bSRob Clark /*
684fad33f4bSRob Clark  * Don't use this!  It is for the very special case of dumping
685fad33f4bSRob Clark  * submits from GPU hangs or faults, were the bo may already
686fad33f4bSRob Clark  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
687fad33f4bSRob Clark  * active list.
688fad33f4bSRob Clark  */
689fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
690fad33f4bSRob Clark {
691fad33f4bSRob Clark 	return get_vaddr(obj, __MSM_MADV_PURGED);
692fad33f4bSRob Clark }
693fad33f4bSRob Clark 
694e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
69518f23049SRob Clark {
6960e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6970e08270aSSushmita Susheelendra 
69890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
69990643a24SRob Clark 	GEM_WARN_ON(msm_obj->vmap_count < 1);
700e4b87d22SRob Clark 
7010e08270aSSushmita Susheelendra 	msm_obj->vmap_count--;
7024cd33c48SRob Clark }
7030e08270aSSushmita Susheelendra 
7040e08270aSSushmita Susheelendra void msm_gem_put_vaddr(struct drm_gem_object *obj)
7054cd33c48SRob Clark {
706a6ae74c9SRob Clark 	msm_gem_lock(obj);
707e4b87d22SRob Clark 	msm_gem_put_vaddr_locked(obj);
708a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7094cd33c48SRob Clark }
7104cd33c48SRob Clark 
7114cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
7124cd33c48SRob Clark  * false or -errno.
7134cd33c48SRob Clark  */
7144cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
7154cd33c48SRob Clark {
7164cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
71768209390SRob Clark 
718a6ae74c9SRob Clark 	msm_gem_lock(obj);
719c8afe684SRob Clark 
720c8afe684SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
721c8afe684SRob Clark 		msm_obj->madv = madv;
722c8afe684SRob Clark 
7230e08270aSSushmita Susheelendra 	madv = msm_obj->madv;
7240e08270aSSushmita Susheelendra 
7253edfa30fSRob Clark 	/* If the obj is inactive, we might need to move it
7263edfa30fSRob Clark 	 * between inactive lists
7273edfa30fSRob Clark 	 */
7283edfa30fSRob Clark 	if (msm_obj->active_count == 0)
7293edfa30fSRob Clark 		update_inactive(msm_obj);
7303edfa30fSRob Clark 
731a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7320e08270aSSushmita Susheelendra 
7330e08270aSSushmita Susheelendra 	return (madv != __MSM_MADV_PURGED);
734c8afe684SRob Clark }
735c8afe684SRob Clark 
736599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj)
73768209390SRob Clark {
73868209390SRob Clark 	struct drm_device *dev = obj->dev;
73968209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
74068209390SRob Clark 
74181d4d597SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
74290643a24SRob Clark 	GEM_WARN_ON(!is_purgeable(msm_obj));
74368209390SRob Clark 
74420d0ae2fSRob Clark 	/* Get rid of any iommu mapping(s): */
74520d0ae2fSRob Clark 	put_iova_spaces(obj, true);
7460e08270aSSushmita Susheelendra 
747599089c6SRob Clark 	msm_gem_vunmap(obj);
74868209390SRob Clark 
74981d4d597SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
75081d4d597SRob Clark 
75168209390SRob Clark 	put_pages(obj);
75268209390SRob Clark 
7539b73bde3SIskren Chernev 	put_iova_vmas(obj);
7549b73bde3SIskren Chernev 
75568209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
75625ed38b3SRob Clark 	update_inactive(msm_obj);
75768209390SRob Clark 
75868209390SRob Clark 	drm_gem_free_mmap_offset(obj);
75968209390SRob Clark 
76068209390SRob Clark 	/* Our goal here is to return as much of the memory as
76168209390SRob Clark 	 * is possible back to the system as we are called from OOM.
76268209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
76368209390SRob Clark 	 * backing pages, *now*.
76468209390SRob Clark 	 */
76568209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
76668209390SRob Clark 
76768209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
76868209390SRob Clark 			0, (loff_t)-1);
76968209390SRob Clark }
77068209390SRob Clark 
771*37c68900SLee Jones /*
77263f17ef8SRob Clark  * Unpin the backing pages and make them available to be swapped out.
77363f17ef8SRob Clark  */
77463f17ef8SRob Clark void msm_gem_evict(struct drm_gem_object *obj)
77563f17ef8SRob Clark {
77663f17ef8SRob Clark 	struct drm_device *dev = obj->dev;
77763f17ef8SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
77863f17ef8SRob Clark 
77963f17ef8SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
78063f17ef8SRob Clark 	GEM_WARN_ON(is_unevictable(msm_obj));
78163f17ef8SRob Clark 	GEM_WARN_ON(!msm_obj->evictable);
78263f17ef8SRob Clark 	GEM_WARN_ON(msm_obj->active_count);
78363f17ef8SRob Clark 
78463f17ef8SRob Clark 	/* Get rid of any iommu mapping(s): */
78563f17ef8SRob Clark 	put_iova_spaces(obj, false);
78663f17ef8SRob Clark 
78763f17ef8SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
78863f17ef8SRob Clark 
78963f17ef8SRob Clark 	put_pages(obj);
79063f17ef8SRob Clark 
79163f17ef8SRob Clark 	update_inactive(msm_obj);
79263f17ef8SRob Clark }
79363f17ef8SRob Clark 
794599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj)
795e1e9db2cSRob Clark {
796e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
797e1e9db2cSRob Clark 
79890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
7990e08270aSSushmita Susheelendra 
80090643a24SRob Clark 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
801e1e9db2cSRob Clark 		return;
802e1e9db2cSRob Clark 
803e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
804e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
805e1e9db2cSRob Clark }
806e1e9db2cSRob Clark 
807b6295f9aSRob Clark /* must be called before _move_to_active().. */
808b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj,
809b6295f9aSRob Clark 		struct msm_fence_context *fctx, bool exclusive)
810b6295f9aSRob Clark {
81152791eeeSChristian König 	struct dma_resv_list *fobj;
812f54d1867SChris Wilson 	struct dma_fence *fence;
813b6295f9aSRob Clark 	int i, ret;
814b6295f9aSRob Clark 
81552791eeeSChristian König 	fobj = dma_resv_get_list(obj->resv);
816b6295f9aSRob Clark 	if (!fobj || (fobj->shared_count == 0)) {
81752791eeeSChristian König 		fence = dma_resv_get_excl(obj->resv);
818b6295f9aSRob Clark 		/* don't need to wait on our own fences, since ring is fifo */
819b6295f9aSRob Clark 		if (fence && (fence->context != fctx->context)) {
820f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
821b6295f9aSRob Clark 			if (ret)
822b6295f9aSRob Clark 				return ret;
823b6295f9aSRob Clark 		}
824b6295f9aSRob Clark 	}
825b6295f9aSRob Clark 
826b6295f9aSRob Clark 	if (!exclusive || !fobj)
827b6295f9aSRob Clark 		return 0;
828b6295f9aSRob Clark 
829b6295f9aSRob Clark 	for (i = 0; i < fobj->shared_count; i++) {
830b6295f9aSRob Clark 		fence = rcu_dereference_protected(fobj->shared[i],
83152791eeeSChristian König 						dma_resv_held(obj->resv));
832b6295f9aSRob Clark 		if (fence->context != fctx->context) {
833f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
834b6295f9aSRob Clark 			if (ret)
835b6295f9aSRob Clark 				return ret;
836b6295f9aSRob Clark 		}
837b6295f9aSRob Clark 	}
838b6295f9aSRob Clark 
839b6295f9aSRob Clark 	return 0;
840b6295f9aSRob Clark }
841b6295f9aSRob Clark 
8429d8baa2bSAkhil P Oommen void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
8437198e6b0SRob Clark {
8447198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
845d984457bSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
846d984457bSRob Clark 
847d984457bSRob Clark 	might_sleep();
84890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
84990643a24SRob Clark 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
85090643a24SRob Clark 	GEM_WARN_ON(msm_obj->dontneed);
85164fcbde7SRob Clark 	GEM_WARN_ON(!msm_obj->sgt);
8529d8baa2bSAkhil P Oommen 
853ab5c54cbSRob Clark 	if (msm_obj->active_count++ == 0) {
854d984457bSRob Clark 		mutex_lock(&priv->mm_lock);
85564fcbde7SRob Clark 		if (msm_obj->evictable)
85664fcbde7SRob Clark 			mark_unevictable(msm_obj);
857cc8a4d5aSRob Clark 		list_del(&msm_obj->mm_list);
8587198e6b0SRob Clark 		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
859d984457bSRob Clark 		mutex_unlock(&priv->mm_lock);
8607198e6b0SRob Clark 	}
8619d8baa2bSAkhil P Oommen }
8627198e6b0SRob Clark 
8639d8baa2bSAkhil P Oommen void msm_gem_active_put(struct drm_gem_object *obj)
8647198e6b0SRob Clark {
8657198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
8667198e6b0SRob Clark 
867d984457bSRob Clark 	might_sleep();
86890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
8697198e6b0SRob Clark 
870ab5c54cbSRob Clark 	if (--msm_obj->active_count == 0) {
8713edfa30fSRob Clark 		update_inactive(msm_obj);
8727198e6b0SRob Clark 	}
8739d8baa2bSAkhil P Oommen }
8747198e6b0SRob Clark 
8753edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj)
8763edfa30fSRob Clark {
8773edfa30fSRob Clark 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
8783edfa30fSRob Clark 
87964fcbde7SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
88064fcbde7SRob Clark 
88164fcbde7SRob Clark 	if (msm_obj->active_count != 0)
88264fcbde7SRob Clark 		return;
88364fcbde7SRob Clark 
8843edfa30fSRob Clark 	mutex_lock(&priv->mm_lock);
8853edfa30fSRob Clark 
886cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
8870054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
88864fcbde7SRob Clark 	if (msm_obj->evictable)
88964fcbde7SRob Clark 		mark_unevictable(msm_obj);
890cc8a4d5aSRob Clark 
891cc8a4d5aSRob Clark 	list_del(&msm_obj->mm_list);
89264fcbde7SRob Clark 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
8933edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
89464fcbde7SRob Clark 		mark_evictable(msm_obj);
895cc8a4d5aSRob Clark 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
8963edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
8970054eeb7SRob Clark 		mark_purgeable(msm_obj);
898cc8a4d5aSRob Clark 	} else {
89964fcbde7SRob Clark 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
90064fcbde7SRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
901cc8a4d5aSRob Clark 	}
9023edfa30fSRob Clark 
9033edfa30fSRob Clark 	mutex_unlock(&priv->mm_lock);
9043edfa30fSRob Clark }
9053edfa30fSRob Clark 
906ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
907ba00c3f2SRob Clark {
908b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
909f755e227SChris Wilson 	unsigned long remain =
910f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
911f755e227SChris Wilson 	long ret;
912b6295f9aSRob Clark 
91352791eeeSChristian König 	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
914f755e227SChris Wilson 						  true,  remain);
915f755e227SChris Wilson 	if (ret == 0)
916f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
917f755e227SChris Wilson 	else if (ret < 0)
918f755e227SChris Wilson 		return ret;
919ba00c3f2SRob Clark 
9207198e6b0SRob Clark 	/* TODO cache maintenance */
9217198e6b0SRob Clark 
922b6295f9aSRob Clark 	return 0;
9237198e6b0SRob Clark }
9247198e6b0SRob Clark 
9257198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
9267198e6b0SRob Clark {
9277198e6b0SRob Clark 	/* TODO cache maintenance */
928c8afe684SRob Clark 	return 0;
929c8afe684SRob Clark }
930c8afe684SRob Clark 
931c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
932f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type,
933b6295f9aSRob Clark 		struct seq_file *m)
934b6295f9aSRob Clark {
935f54d1867SChris Wilson 	if (!dma_fence_is_signaled(fence))
936a3115621SDave Airlie 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
937b6295f9aSRob Clark 				fence->ops->get_driver_name(fence),
938b6295f9aSRob Clark 				fence->ops->get_timeline_name(fence),
939b6295f9aSRob Clark 				fence->seqno);
940b6295f9aSRob Clark }
941b6295f9aSRob Clark 
942528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
943528107c8SRob Clark 		struct msm_gem_stats *stats)
944c8afe684SRob Clark {
945c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
94652791eeeSChristian König 	struct dma_resv *robj = obj->resv;
94752791eeeSChristian König 	struct dma_resv_list *fobj;
948f54d1867SChris Wilson 	struct dma_fence *fence;
9494b85f7f5SRob Clark 	struct msm_gem_vma *vma;
950c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
9514cd33c48SRob Clark 	const char *madv;
952c8afe684SRob Clark 
953a6ae74c9SRob Clark 	msm_gem_lock(obj);
954b6295f9aSRob Clark 
955528107c8SRob Clark 	stats->all.count++;
956528107c8SRob Clark 	stats->all.size += obj->size;
957528107c8SRob Clark 
958528107c8SRob Clark 	if (is_active(msm_obj)) {
959528107c8SRob Clark 		stats->active.count++;
960528107c8SRob Clark 		stats->active.size += obj->size;
961528107c8SRob Clark 	}
962528107c8SRob Clark 
963f48f3563SRob Clark 	if (msm_obj->pages) {
964f48f3563SRob Clark 		stats->resident.count++;
965f48f3563SRob Clark 		stats->resident.size += obj->size;
966f48f3563SRob Clark 	}
967f48f3563SRob Clark 
9684cd33c48SRob Clark 	switch (msm_obj->madv) {
9694cd33c48SRob Clark 	case __MSM_MADV_PURGED:
970528107c8SRob Clark 		stats->purged.count++;
971528107c8SRob Clark 		stats->purged.size += obj->size;
9724cd33c48SRob Clark 		madv = " purged";
9734cd33c48SRob Clark 		break;
9744cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
9750054eeb7SRob Clark 		stats->purgeable.count++;
9760054eeb7SRob Clark 		stats->purgeable.size += obj->size;
9774cd33c48SRob Clark 		madv = " purgeable";
9784cd33c48SRob Clark 		break;
9794cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
9804cd33c48SRob Clark 	default:
9814cd33c48SRob Clark 		madv = "";
9824cd33c48SRob Clark 		break;
9834cd33c48SRob Clark 	}
9844cd33c48SRob Clark 
985575f0485SJordan Crouse 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
9867198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
9872c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
988667ce33eSRob Clark 			off, msm_obj->vaddr);
989667ce33eSRob Clark 
9900815d774SJordan Crouse 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
991667ce33eSRob Clark 
992575f0485SJordan Crouse 	if (!list_empty(&msm_obj->vmas)) {
993575f0485SJordan Crouse 
994575f0485SJordan Crouse 		seq_puts(m, "      vmas:");
995575f0485SJordan Crouse 
99625faf2f2SRob Clark 		list_for_each_entry(vma, &msm_obj->vmas, list) {
99725faf2f2SRob Clark 			const char *name, *comm;
99825faf2f2SRob Clark 			if (vma->aspace) {
99925faf2f2SRob Clark 				struct msm_gem_address_space *aspace = vma->aspace;
100025faf2f2SRob Clark 				struct task_struct *task =
100125faf2f2SRob Clark 					get_pid_task(aspace->pid, PIDTYPE_PID);
100225faf2f2SRob Clark 				if (task) {
100325faf2f2SRob Clark 					comm = kstrdup(task->comm, GFP_KERNEL);
100425faf2f2SRob Clark 				} else {
100525faf2f2SRob Clark 					comm = NULL;
100625faf2f2SRob Clark 				}
100725faf2f2SRob Clark 				name = aspace->name;
100825faf2f2SRob Clark 			} else {
100925faf2f2SRob Clark 				name = comm = NULL;
101025faf2f2SRob Clark 			}
101125faf2f2SRob Clark 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
101225faf2f2SRob Clark 				name, comm ? ":" : "", comm ? comm : "",
101325faf2f2SRob Clark 				vma->aspace, vma->iova,
101425faf2f2SRob Clark 				vma->mapped ? "mapped" : "unmapped",
10157ad0e8cfSJordan Crouse 				vma->inuse);
101625faf2f2SRob Clark 			kfree(comm);
101725faf2f2SRob Clark 		}
1018575f0485SJordan Crouse 
1019575f0485SJordan Crouse 		seq_puts(m, "\n");
1020575f0485SJordan Crouse 	}
1021b6295f9aSRob Clark 
1022b6295f9aSRob Clark 	rcu_read_lock();
1023b6295f9aSRob Clark 	fobj = rcu_dereference(robj->fence);
1024b6295f9aSRob Clark 	if (fobj) {
1025b6295f9aSRob Clark 		unsigned int i, shared_count = fobj->shared_count;
1026b6295f9aSRob Clark 
1027b6295f9aSRob Clark 		for (i = 0; i < shared_count; i++) {
1028b6295f9aSRob Clark 			fence = rcu_dereference(fobj->shared[i]);
1029b6295f9aSRob Clark 			describe_fence(fence, "Shared", m);
1030b6295f9aSRob Clark 		}
1031b6295f9aSRob Clark 	}
1032b6295f9aSRob Clark 
1033b6295f9aSRob Clark 	fence = rcu_dereference(robj->fence_excl);
1034b6295f9aSRob Clark 	if (fence)
1035b6295f9aSRob Clark 		describe_fence(fence, "Exclusive", m);
1036b6295f9aSRob Clark 	rcu_read_unlock();
10370e08270aSSushmita Susheelendra 
1038a6ae74c9SRob Clark 	msm_gem_unlock(obj);
1039c8afe684SRob Clark }
1040c8afe684SRob Clark 
1041c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1042c8afe684SRob Clark {
1043528107c8SRob Clark 	struct msm_gem_stats stats = {};
1044c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1045c8afe684SRob Clark 
10460815d774SJordan Crouse 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
10476ed0897cSRob Clark 	list_for_each_entry(msm_obj, list, node) {
1048c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
1049575f0485SJordan Crouse 		seq_puts(m, "   ");
1050528107c8SRob Clark 		msm_gem_describe(obj, m, &stats);
1051c8afe684SRob Clark 	}
1052c8afe684SRob Clark 
1053528107c8SRob Clark 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1054528107c8SRob Clark 			stats.all.count, stats.all.size);
1055528107c8SRob Clark 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1056528107c8SRob Clark 			stats.active.count, stats.active.size);
1057f48f3563SRob Clark 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1058f48f3563SRob Clark 			stats.resident.count, stats.resident.size);
1059f1902c6bSColin Ian King 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
10600054eeb7SRob Clark 			stats.purgeable.count, stats.purgeable.size);
1061528107c8SRob Clark 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1062528107c8SRob Clark 			stats.purged.count, stats.purged.size);
1063c8afe684SRob Clark }
1064c8afe684SRob Clark #endif
1065c8afe684SRob Clark 
1066eecd7fd8SEmil Velikov /* don't call directly!  Use drm_gem_object_put_locked() and friends */
1067c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
1068c8afe684SRob Clark {
1069c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
107048e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
107148e7f183SKristian H. Kristensen 	struct msm_drm_private *priv = dev->dev_private;
107248e7f183SKristian H. Kristensen 
10736ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
10746ed0897cSRob Clark 	list_del(&msm_obj->node);
10756ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
10766ed0897cSRob Clark 
1077d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
1078cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
10790054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
1080c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
1081d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
1082c8afe684SRob Clark 
1083a6ae74c9SRob Clark 	msm_gem_lock(obj);
1084c8afe684SRob Clark 
1085c8afe684SRob Clark 	/* object should not be on active list: */
108690643a24SRob Clark 	GEM_WARN_ON(is_active(msm_obj));
1087c8afe684SRob Clark 
108820d0ae2fSRob Clark 	put_iova_spaces(obj, true);
1089c8afe684SRob Clark 
109005b84911SRob Clark 	if (obj->import_attach) {
109190643a24SRob Clark 		GEM_WARN_ON(msm_obj->vaddr);
109205b84911SRob Clark 
109305b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
109405b84911SRob Clark 		 * ours, just free the array we allocated:
109505b84911SRob Clark 		 */
10962098105eSMichal Hocko 		kvfree(msm_obj->pages);
109705b84911SRob Clark 
109857f04815SRob Clark 		put_iova_vmas(obj);
109957f04815SRob Clark 
11006c0e3ea2SRob Clark 		/* dma_buf_detach() grabs resv lock, so we need to unlock
11016c0e3ea2SRob Clark 		 * prior to drm_prime_gem_destroy
11026c0e3ea2SRob Clark 		 */
11036c0e3ea2SRob Clark 		msm_gem_unlock(obj);
11046c0e3ea2SRob Clark 
1105f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
110605b84911SRob Clark 	} else {
1107599089c6SRob Clark 		msm_gem_vunmap(obj);
1108c8afe684SRob Clark 		put_pages(obj);
110957f04815SRob Clark 		put_iova_vmas(obj);
11106c0e3ea2SRob Clark 		msm_gem_unlock(obj);
111105b84911SRob Clark 	}
1112c8afe684SRob Clark 
1113c8afe684SRob Clark 	drm_gem_object_release(obj);
1114c8afe684SRob Clark 
1115c8afe684SRob Clark 	kfree(msm_obj);
1116c8afe684SRob Clark }
1117c8afe684SRob Clark 
1118c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
1119c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
11200815d774SJordan Crouse 		uint32_t size, uint32_t flags, uint32_t *handle,
11210815d774SJordan Crouse 		char *name)
1122c8afe684SRob Clark {
1123c8afe684SRob Clark 	struct drm_gem_object *obj;
1124c8afe684SRob Clark 	int ret;
1125c8afe684SRob Clark 
1126c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
1127c8afe684SRob Clark 
1128c8afe684SRob Clark 	if (IS_ERR(obj))
1129c8afe684SRob Clark 		return PTR_ERR(obj);
1130c8afe684SRob Clark 
11310815d774SJordan Crouse 	if (name)
11320815d774SJordan Crouse 		msm_gem_object_set_name(obj, "%s", name);
11330815d774SJordan Crouse 
1134c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
1135c8afe684SRob Clark 
1136c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
1137f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1138c8afe684SRob Clark 
1139c8afe684SRob Clark 	return ret;
1140c8afe684SRob Clark }
1141c8afe684SRob Clark 
11423c9edd9cSThomas Zimmermann static const struct vm_operations_struct vm_ops = {
11433c9edd9cSThomas Zimmermann 	.fault = msm_gem_fault,
11443c9edd9cSThomas Zimmermann 	.open = drm_gem_vm_open,
11453c9edd9cSThomas Zimmermann 	.close = drm_gem_vm_close,
11463c9edd9cSThomas Zimmermann };
11473c9edd9cSThomas Zimmermann 
11483c9edd9cSThomas Zimmermann static const struct drm_gem_object_funcs msm_gem_object_funcs = {
11493c9edd9cSThomas Zimmermann 	.free = msm_gem_free_object,
11503c9edd9cSThomas Zimmermann 	.pin = msm_gem_prime_pin,
11513c9edd9cSThomas Zimmermann 	.unpin = msm_gem_prime_unpin,
11523c9edd9cSThomas Zimmermann 	.get_sg_table = msm_gem_prime_get_sg_table,
11533c9edd9cSThomas Zimmermann 	.vmap = msm_gem_prime_vmap,
11543c9edd9cSThomas Zimmermann 	.vunmap = msm_gem_prime_vunmap,
11553c9edd9cSThomas Zimmermann 	.vm_ops = &vm_ops,
11563c9edd9cSThomas Zimmermann };
11573c9edd9cSThomas Zimmermann 
115805b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
115905b84911SRob Clark 		uint32_t size, uint32_t flags,
11603cbdc8d8SAkhil P Oommen 		struct drm_gem_object **obj)
1161c8afe684SRob Clark {
1162d12e3390SJonathan Marek 	struct msm_drm_private *priv = dev->dev_private;
1163c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1164c8afe684SRob Clark 
1165c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
1166c8afe684SRob Clark 	case MSM_BO_UNCACHED:
1167c8afe684SRob Clark 	case MSM_BO_CACHED:
1168c8afe684SRob Clark 	case MSM_BO_WC:
1169c8afe684SRob Clark 		break;
1170d12e3390SJonathan Marek 	case MSM_BO_CACHED_COHERENT:
1171d12e3390SJonathan Marek 		if (priv->has_cached_coherent)
1172d12e3390SJonathan Marek 			break;
1173d12e3390SJonathan Marek 		/* fallthrough */
1174c8afe684SRob Clark 	default:
11756a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1176c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
117705b84911SRob Clark 		return -EINVAL;
1178c8afe684SRob Clark 	}
1179c8afe684SRob Clark 
1180667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
118105b84911SRob Clark 	if (!msm_obj)
118205b84911SRob Clark 		return -ENOMEM;
1183c8afe684SRob Clark 
1184c8afe684SRob Clark 	msm_obj->flags = flags;
11854cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
1186c8afe684SRob Clark 
11877198e6b0SRob Clark 	INIT_LIST_HEAD(&msm_obj->submit_entry);
11884b85f7f5SRob Clark 	INIT_LIST_HEAD(&msm_obj->vmas);
11894b85f7f5SRob Clark 
119005b84911SRob Clark 	*obj = &msm_obj->base;
11913c9edd9cSThomas Zimmermann 	(*obj)->funcs = &msm_gem_object_funcs;
119205b84911SRob Clark 
119305b84911SRob Clark 	return 0;
119405b84911SRob Clark }
119505b84911SRob Clark 
11960e08270aSSushmita Susheelendra static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
11970e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
119805b84911SRob Clark {
1199f4839bd5SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
12003cbdc8d8SAkhil P Oommen 	struct msm_gem_object *msm_obj;
1201871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
1202f4839bd5SRob Clark 	bool use_vram = false;
120305b84911SRob Clark 	int ret;
120405b84911SRob Clark 
120505b84911SRob Clark 	size = PAGE_ALIGN(size);
120605b84911SRob Clark 
1207c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev))
1208f4839bd5SRob Clark 		use_vram = true;
120986f46f25SJonathan Marek 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1210f4839bd5SRob Clark 		use_vram = true;
1211f4839bd5SRob Clark 
121290643a24SRob Clark 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1213f4839bd5SRob Clark 		return ERR_PTR(-EINVAL);
1214f4839bd5SRob Clark 
12151a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
12161a5dff5dSJordan Crouse 	 * infrastructure grumpy
12171a5dff5dSJordan Crouse 	 */
12181a5dff5dSJordan Crouse 	if (size == 0)
12191a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
12201a5dff5dSJordan Crouse 
12213cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, flags, &obj);
122205b84911SRob Clark 	if (ret)
122305b84911SRob Clark 		goto fail;
122405b84911SRob Clark 
12253cbdc8d8SAkhil P Oommen 	msm_obj = to_msm_bo(obj);
12263cbdc8d8SAkhil P Oommen 
1227f4839bd5SRob Clark 	if (use_vram) {
12284b85f7f5SRob Clark 		struct msm_gem_vma *vma;
1229f4839bd5SRob Clark 		struct page **pages;
1230b3949a9aSHans Verkuil 
1231a694ffedSIskren Chernev 		drm_gem_private_object_init(dev, obj, size);
1232a694ffedSIskren Chernev 
1233a6ae74c9SRob Clark 		msm_gem_lock(obj);
1234f4839bd5SRob Clark 
12354b85f7f5SRob Clark 		vma = add_vma(obj, NULL);
1236a6ae74c9SRob Clark 		msm_gem_unlock(obj);
12374b85f7f5SRob Clark 		if (IS_ERR(vma)) {
12384b85f7f5SRob Clark 			ret = PTR_ERR(vma);
12394b85f7f5SRob Clark 			goto fail;
12404b85f7f5SRob Clark 		}
12414b85f7f5SRob Clark 
12424b85f7f5SRob Clark 		to_msm_bo(obj)->vram_node = &vma->node;
12434b85f7f5SRob Clark 
124445f56690SAlexey Minnekhanov 		/* Call chain get_pages() -> update_inactive() tries to
124545f56690SAlexey Minnekhanov 		 * access msm_obj->mm_list, but it is not initialized yet.
124645f56690SAlexey Minnekhanov 		 * To avoid NULL pointer dereference error, initialize
124745f56690SAlexey Minnekhanov 		 * mm_list to be empty.
124845f56690SAlexey Minnekhanov 		 */
124945f56690SAlexey Minnekhanov 		INIT_LIST_HEAD(&msm_obj->mm_list);
125045f56690SAlexey Minnekhanov 
125107fcad0dSIskren Chernev 		msm_gem_lock(obj);
1252f4839bd5SRob Clark 		pages = get_pages(obj);
125307fcad0dSIskren Chernev 		msm_gem_unlock(obj);
1254f4839bd5SRob Clark 		if (IS_ERR(pages)) {
1255f4839bd5SRob Clark 			ret = PTR_ERR(pages);
1256f4839bd5SRob Clark 			goto fail;
1257f4839bd5SRob Clark 		}
12584b85f7f5SRob Clark 
12594b85f7f5SRob Clark 		vma->iova = physaddr(obj);
1260f4839bd5SRob Clark 	} else {
126105b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
126205b84911SRob Clark 		if (ret)
126305b84911SRob Clark 			goto fail;
12640abdba47SLucas Stach 		/*
12650abdba47SLucas Stach 		 * Our buffers are kept pinned, so allocating them from the
12660abdba47SLucas Stach 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
12670abdba47SLucas Stach 		 * See comments above new_inode() why this is required _and_
12680abdba47SLucas Stach 		 * expected if you're going to pin these pages.
12690abdba47SLucas Stach 		 */
12700abdba47SLucas Stach 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1271871d812aSRob Clark 	}
127205b84911SRob Clark 
1273d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
127464fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1275d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
12763cbdc8d8SAkhil P Oommen 
12776ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
12786ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
12796ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
12806ed0897cSRob Clark 
128105b84911SRob Clark 	return obj;
128205b84911SRob Clark 
128305b84911SRob Clark fail:
1284ce0a9dc0SRob Clark 	if (struct_mutex_locked) {
1285ce0a9dc0SRob Clark 		drm_gem_object_put_locked(obj);
1286ce0a9dc0SRob Clark 	} else {
1287f7d33950SEmil Velikov 		drm_gem_object_put(obj);
1288ce0a9dc0SRob Clark 	}
128905b84911SRob Clark 	return ERR_PTR(ret);
129005b84911SRob Clark }
129105b84911SRob Clark 
12920e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
12930e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags)
12940e08270aSSushmita Susheelendra {
12950e08270aSSushmita Susheelendra 	return _msm_gem_new(dev, size, flags, true);
12960e08270aSSushmita Susheelendra }
12970e08270aSSushmita Susheelendra 
12980e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new(struct drm_device *dev,
12990e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags)
13000e08270aSSushmita Susheelendra {
13010e08270aSSushmita Susheelendra 	return _msm_gem_new(dev, size, flags, false);
13020e08270aSSushmita Susheelendra }
13030e08270aSSushmita Susheelendra 
130405b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
130579f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
130605b84911SRob Clark {
13073cbdc8d8SAkhil P Oommen 	struct msm_drm_private *priv = dev->dev_private;
130805b84911SRob Clark 	struct msm_gem_object *msm_obj;
130905b84911SRob Clark 	struct drm_gem_object *obj;
131079f0e202SRob Clark 	uint32_t size;
131105b84911SRob Clark 	int ret, npages;
131205b84911SRob Clark 
1313871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
1314c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev)) {
13156a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1316871d812aSRob Clark 		return ERR_PTR(-EINVAL);
1317871d812aSRob Clark 	}
1318871d812aSRob Clark 
131979f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
132005b84911SRob Clark 
13213cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
132205b84911SRob Clark 	if (ret)
132305b84911SRob Clark 		goto fail;
132405b84911SRob Clark 
132505b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
132605b84911SRob Clark 
132705b84911SRob Clark 	npages = size / PAGE_SIZE;
132805b84911SRob Clark 
132905b84911SRob Clark 	msm_obj = to_msm_bo(obj);
1330a6ae74c9SRob Clark 	msm_gem_lock(obj);
133105b84911SRob Clark 	msm_obj->sgt = sgt;
13322098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
133305b84911SRob Clark 	if (!msm_obj->pages) {
1334a6ae74c9SRob Clark 		msm_gem_unlock(obj);
133505b84911SRob Clark 		ret = -ENOMEM;
133605b84911SRob Clark 		goto fail;
133705b84911SRob Clark 	}
133805b84911SRob Clark 
1339c67e6279SChristian König 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
13400e08270aSSushmita Susheelendra 	if (ret) {
1341a6ae74c9SRob Clark 		msm_gem_unlock(obj);
134205b84911SRob Clark 		goto fail;
13430e08270aSSushmita Susheelendra 	}
134405b84911SRob Clark 
1345a6ae74c9SRob Clark 	msm_gem_unlock(obj);
13463cbdc8d8SAkhil P Oommen 
1347d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
134864fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1349d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
13503cbdc8d8SAkhil P Oommen 
13516ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
13526ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
13536ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
13546ed0897cSRob Clark 
1355c8afe684SRob Clark 	return obj;
1356c8afe684SRob Clark 
1357c8afe684SRob Clark fail:
1358f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1359c8afe684SRob Clark 	return ERR_PTR(ret);
1360c8afe684SRob Clark }
13618223286dSJordan Crouse 
13628223286dSJordan Crouse static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
13638223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
13648223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
13658223286dSJordan Crouse {
13668223286dSJordan Crouse 	void *vaddr;
13678223286dSJordan Crouse 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
13688223286dSJordan Crouse 	int ret;
13698223286dSJordan Crouse 
13708223286dSJordan Crouse 	if (IS_ERR(obj))
13718223286dSJordan Crouse 		return ERR_CAST(obj);
13728223286dSJordan Crouse 
13738223286dSJordan Crouse 	if (iova) {
13749fe041f6SJordan Crouse 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
137593f7abf1SJordan Crouse 		if (ret)
137693f7abf1SJordan Crouse 			goto err;
13778223286dSJordan Crouse 	}
13788223286dSJordan Crouse 
13798223286dSJordan Crouse 	vaddr = msm_gem_get_vaddr(obj);
1380c9811d0fSWei Yongjun 	if (IS_ERR(vaddr)) {
13817ad0e8cfSJordan Crouse 		msm_gem_unpin_iova(obj, aspace);
138293f7abf1SJordan Crouse 		ret = PTR_ERR(vaddr);
138393f7abf1SJordan Crouse 		goto err;
13848223286dSJordan Crouse 	}
13858223286dSJordan Crouse 
13868223286dSJordan Crouse 	if (bo)
13878223286dSJordan Crouse 		*bo = obj;
13888223286dSJordan Crouse 
13898223286dSJordan Crouse 	return vaddr;
139093f7abf1SJordan Crouse err:
139193f7abf1SJordan Crouse 	if (locked)
1392eecd7fd8SEmil Velikov 		drm_gem_object_put_locked(obj);
139393f7abf1SJordan Crouse 	else
1394f7d33950SEmil Velikov 		drm_gem_object_put(obj);
139593f7abf1SJordan Crouse 
139693f7abf1SJordan Crouse 	return ERR_PTR(ret);
139793f7abf1SJordan Crouse 
13988223286dSJordan Crouse }
13998223286dSJordan Crouse 
14008223286dSJordan Crouse void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
14018223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
14028223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova)
14038223286dSJordan Crouse {
14048223286dSJordan Crouse 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
14058223286dSJordan Crouse }
14068223286dSJordan Crouse 
14078223286dSJordan Crouse void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
14088223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
14098223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova)
14108223286dSJordan Crouse {
14118223286dSJordan Crouse 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
14128223286dSJordan Crouse }
14131e29dff0SJordan Crouse 
14141e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo,
14151e29dff0SJordan Crouse 		struct msm_gem_address_space *aspace, bool locked)
14161e29dff0SJordan Crouse {
14171e29dff0SJordan Crouse 	if (IS_ERR_OR_NULL(bo))
14181e29dff0SJordan Crouse 		return;
14191e29dff0SJordan Crouse 
14201e29dff0SJordan Crouse 	msm_gem_put_vaddr(bo);
14217ad0e8cfSJordan Crouse 	msm_gem_unpin_iova(bo, aspace);
14221e29dff0SJordan Crouse 
14231e29dff0SJordan Crouse 	if (locked)
1424eecd7fd8SEmil Velikov 		drm_gem_object_put_locked(bo);
14251e29dff0SJordan Crouse 	else
1426f7d33950SEmil Velikov 		drm_gem_object_put(bo);
14271e29dff0SJordan Crouse }
14280815d774SJordan Crouse 
14290815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
14300815d774SJordan Crouse {
14310815d774SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
14320815d774SJordan Crouse 	va_list ap;
14330815d774SJordan Crouse 
14340815d774SJordan Crouse 	if (!fmt)
14350815d774SJordan Crouse 		return;
14360815d774SJordan Crouse 
14370815d774SJordan Crouse 	va_start(ap, fmt);
14380815d774SJordan Crouse 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
14390815d774SJordan Crouse 	va_end(ap);
14400815d774SJordan Crouse }
1441