xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision af9b3547)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8afe684SRob Clark /*
3c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
4c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5c8afe684SRob Clark  */
6c8afe684SRob Clark 
70a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
8c8afe684SRob Clark #include <linux/spinlock.h>
9c8afe684SRob Clark #include <linux/shmem_fs.h>
1005b84911SRob Clark #include <linux/dma-buf.h>
1101c8f1c4SDan Williams #include <linux/pfn_t.h>
12c8afe684SRob Clark 
13feea39a8SSam Ravnborg #include <drm/drm_prime.h>
14feea39a8SSam Ravnborg 
15c8afe684SRob Clark #include "msm_drv.h"
16fde5de6cSRob Clark #include "msm_fence.h"
17c8afe684SRob Clark #include "msm_gem.h"
187198e6b0SRob Clark #include "msm_gpu.h"
19871d812aSRob Clark #include "msm_mmu.h"
20c8afe684SRob Clark 
213edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj);
220e08270aSSushmita Susheelendra 
23871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
24871d812aSRob Clark {
25871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
26871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
27871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28871d812aSRob Clark 			priv->vram.paddr;
29871d812aSRob Clark }
30871d812aSRob Clark 
31072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
32072f1f91SRob Clark {
33072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
34072f1f91SRob Clark 	return !msm_obj->vram_node;
35072f1f91SRob Clark }
36072f1f91SRob Clark 
373de433c5SRob Clark /*
383de433c5SRob Clark  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
393de433c5SRob Clark  * API.  Really GPU cache is out of scope here (handled on cmdstream)
403de433c5SRob Clark  * and all we need to do is invalidate newly allocated pages before
413de433c5SRob Clark  * mapping to CPU as uncached/writecombine.
423de433c5SRob Clark  *
433de433c5SRob Clark  * On top of this, we have the added headache, that depending on
443de433c5SRob Clark  * display generation, the display's iommu may be wired up to either
453de433c5SRob Clark  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
463de433c5SRob Clark  * that here we either have dma-direct or iommu ops.
473de433c5SRob Clark  *
483de433c5SRob Clark  * Let this be a cautionary tail of abstraction gone wrong.
493de433c5SRob Clark  */
503de433c5SRob Clark 
513de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj)
523de433c5SRob Clark {
533de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
543de433c5SRob Clark 
557690a33fSMarek Szyprowski 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
563de433c5SRob Clark }
573de433c5SRob Clark 
583de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj)
593de433c5SRob Clark {
603de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
613de433c5SRob Clark 
627690a33fSMarek Szyprowski 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
633de433c5SRob Clark }
643de433c5SRob Clark 
65871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
660e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
67871d812aSRob Clark {
68871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
69871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
70871d812aSRob Clark 	dma_addr_t paddr;
71871d812aSRob Clark 	struct page **p;
72871d812aSRob Clark 	int ret, i;
73871d812aSRob Clark 
742098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
75871d812aSRob Clark 	if (!p)
76871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
77871d812aSRob Clark 
780e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
794e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
800e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
81871d812aSRob Clark 	if (ret) {
822098105eSMichal Hocko 		kvfree(p);
83871d812aSRob Clark 		return ERR_PTR(ret);
84871d812aSRob Clark 	}
85871d812aSRob Clark 
86871d812aSRob Clark 	paddr = physaddr(obj);
87871d812aSRob Clark 	for (i = 0; i < npages; i++) {
88871d812aSRob Clark 		p[i] = phys_to_page(paddr);
89871d812aSRob Clark 		paddr += PAGE_SIZE;
90871d812aSRob Clark 	}
91871d812aSRob Clark 
92871d812aSRob Clark 	return p;
93871d812aSRob Clark }
94c8afe684SRob Clark 
95c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
96c8afe684SRob Clark {
97c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
98c8afe684SRob Clark 
9990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
10007fcad0dSIskren Chernev 
101c8afe684SRob Clark 	if (!msm_obj->pages) {
102c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
103871d812aSRob Clark 		struct page **p;
104c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
105c8afe684SRob Clark 
106072f1f91SRob Clark 		if (use_pages(obj))
1070cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
108871d812aSRob Clark 		else
109871d812aSRob Clark 			p = get_pages_vram(obj, npages);
110871d812aSRob Clark 
111c8afe684SRob Clark 		if (IS_ERR(p)) {
1126a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
113c8afe684SRob Clark 					PTR_ERR(p));
114c8afe684SRob Clark 			return p;
115c8afe684SRob Clark 		}
116c8afe684SRob Clark 
11762e3a3e3SPrakash Kamliya 		msm_obj->pages = p;
11862e3a3e3SPrakash Kamliya 
119707d561fSGerd Hoffmann 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1201f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
12162e3a3e3SPrakash Kamliya 			void *ptr = ERR_CAST(msm_obj->sgt);
122c8afe684SRob Clark 
1236a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
12462e3a3e3SPrakash Kamliya 			msm_obj->sgt = NULL;
12562e3a3e3SPrakash Kamliya 			return ptr;
12662e3a3e3SPrakash Kamliya 		}
127c8afe684SRob Clark 
128c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
129c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
130c8afe684SRob Clark 		 */
131c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1323de433c5SRob Clark 			sync_for_device(msm_obj);
13364fcbde7SRob Clark 
13464fcbde7SRob Clark 		GEM_WARN_ON(msm_obj->active_count);
13564fcbde7SRob Clark 		update_inactive(msm_obj);
136c8afe684SRob Clark 	}
137c8afe684SRob Clark 
138c8afe684SRob Clark 	return msm_obj->pages;
139c8afe684SRob Clark }
140c8afe684SRob Clark 
1410e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj)
1420e08270aSSushmita Susheelendra {
1430e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1440e08270aSSushmita Susheelendra 	struct msm_drm_private *priv = obj->dev->dev_private;
1450e08270aSSushmita Susheelendra 
1460e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
1470e08270aSSushmita Susheelendra 	drm_mm_remove_node(msm_obj->vram_node);
1480e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
1490e08270aSSushmita Susheelendra 
1500e08270aSSushmita Susheelendra 	kvfree(msm_obj->pages);
1510e08270aSSushmita Susheelendra }
1520e08270aSSushmita Susheelendra 
153c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
154c8afe684SRob Clark {
155c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156c8afe684SRob Clark 
157c8afe684SRob Clark 	if (msm_obj->pages) {
1583976626eSBen Hutchings 		if (msm_obj->sgt) {
1593976626eSBen Hutchings 			/* For non-cached buffers, ensure the new
1603976626eSBen Hutchings 			 * pages are clean because display controller,
1613976626eSBen Hutchings 			 * GPU, etc. are not coherent:
162c8afe684SRob Clark 			 */
163c8afe684SRob Clark 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1643de433c5SRob Clark 				sync_for_cpu(msm_obj);
16562e3a3e3SPrakash Kamliya 
166c8afe684SRob Clark 			sg_free_table(msm_obj->sgt);
167c8afe684SRob Clark 			kfree(msm_obj->sgt);
168b9a31d0dSRob Clark 			msm_obj->sgt = NULL;
1693976626eSBen Hutchings 		}
170c8afe684SRob Clark 
171072f1f91SRob Clark 		if (use_pages(obj))
172c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1730e08270aSSushmita Susheelendra 		else
1740e08270aSSushmita Susheelendra 			put_pages_vram(obj);
175871d812aSRob Clark 
176c8afe684SRob Clark 		msm_obj->pages = NULL;
177c8afe684SRob Clark 	}
178c8afe684SRob Clark }
179c8afe684SRob Clark 
18005b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
18105b84911SRob Clark {
1820e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
18305b84911SRob Clark 	struct page **p;
1840e08270aSSushmita Susheelendra 
185a6ae74c9SRob Clark 	msm_gem_lock(obj);
1860e08270aSSushmita Susheelendra 
18790643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188a6ae74c9SRob Clark 		msm_gem_unlock(obj);
1890e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
1900e08270aSSushmita Susheelendra 	}
1910e08270aSSushmita Susheelendra 
19205b84911SRob Clark 	p = get_pages(obj);
19310f76165SRob Clark 
19410f76165SRob Clark 	if (!IS_ERR(p)) {
19510f76165SRob Clark 		msm_obj->pin_count++;
19610f76165SRob Clark 		update_inactive(msm_obj);
19710f76165SRob Clark 	}
19810f76165SRob Clark 
199a6ae74c9SRob Clark 	msm_gem_unlock(obj);
20005b84911SRob Clark 	return p;
20105b84911SRob Clark }
20205b84911SRob Clark 
20305b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
20405b84911SRob Clark {
20510f76165SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
20610f76165SRob Clark 
20710f76165SRob Clark 	msm_gem_lock(obj);
20810f76165SRob Clark 	msm_obj->pin_count--;
20910f76165SRob Clark 	GEM_WARN_ON(msm_obj->pin_count < 0);
21010f76165SRob Clark 	update_inactive(msm_obj);
21110f76165SRob Clark 	msm_gem_unlock(obj);
21205b84911SRob Clark }
21305b84911SRob Clark 
214*af9b3547SJonathan Marek static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
215*af9b3547SJonathan Marek {
216*af9b3547SJonathan Marek 	if (msm_obj->flags & MSM_BO_WC)
217*af9b3547SJonathan Marek 		return pgprot_writecombine(prot);
218*af9b3547SJonathan Marek 	if (msm_obj->flags & MSM_BO_UNCACHED)
219*af9b3547SJonathan Marek 		return pgprot_noncached(prot);
220*af9b3547SJonathan Marek 	return prot;
221*af9b3547SJonathan Marek }
222*af9b3547SJonathan Marek 
223c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj,
224c8afe684SRob Clark 		struct vm_area_struct *vma)
225c8afe684SRob Clark {
226c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
227c8afe684SRob Clark 
228c8afe684SRob Clark 	vma->vm_flags &= ~VM_PFNMAP;
229c8afe684SRob Clark 	vma->vm_flags |= VM_MIXEDMAP;
230*af9b3547SJonathan Marek 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
231c8afe684SRob Clark 
232c8afe684SRob Clark 	return 0;
233c8afe684SRob Clark }
234c8afe684SRob Clark 
235c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
236c8afe684SRob Clark {
237c8afe684SRob Clark 	int ret;
238c8afe684SRob Clark 
239c8afe684SRob Clark 	ret = drm_gem_mmap(filp, vma);
240c8afe684SRob Clark 	if (ret) {
241c8afe684SRob Clark 		DBG("mmap failed: %d", ret);
242c8afe684SRob Clark 		return ret;
243c8afe684SRob Clark 	}
244c8afe684SRob Clark 
245c8afe684SRob Clark 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
246c8afe684SRob Clark }
247c8afe684SRob Clark 
2483c9edd9cSThomas Zimmermann static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
249c8afe684SRob Clark {
25011bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
251c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
2520e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
253c8afe684SRob Clark 	struct page **pages;
254c8afe684SRob Clark 	unsigned long pfn;
255c8afe684SRob Clark 	pgoff_t pgoff;
256a5f74ec7SSouptick Joarder 	int err;
257a5f74ec7SSouptick Joarder 	vm_fault_t ret;
258c8afe684SRob Clark 
2590e08270aSSushmita Susheelendra 	/*
2600e08270aSSushmita Susheelendra 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
2610e08270aSSushmita Susheelendra 	 * a reference on obj. So, we dont need to hold one here.
262d78d383aSRob Clark 	 */
263a6ae74c9SRob Clark 	err = msm_gem_lock_interruptible(obj);
264a5f74ec7SSouptick Joarder 	if (err) {
265a5f74ec7SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
266c8afe684SRob Clark 		goto out;
267a5f74ec7SSouptick Joarder 	}
268c8afe684SRob Clark 
26990643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
270a6ae74c9SRob Clark 		msm_gem_unlock(obj);
2710e08270aSSushmita Susheelendra 		return VM_FAULT_SIGBUS;
2720e08270aSSushmita Susheelendra 	}
2730e08270aSSushmita Susheelendra 
274c8afe684SRob Clark 	/* make sure we have pages attached now */
275c8afe684SRob Clark 	pages = get_pages(obj);
276c8afe684SRob Clark 	if (IS_ERR(pages)) {
277a5f74ec7SSouptick Joarder 		ret = vmf_error(PTR_ERR(pages));
278c8afe684SRob Clark 		goto out_unlock;
279c8afe684SRob Clark 	}
280c8afe684SRob Clark 
281c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2821a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
283c8afe684SRob Clark 
284871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
285c8afe684SRob Clark 
2861a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
287c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
288c8afe684SRob Clark 
289a5f74ec7SSouptick Joarder 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
290c8afe684SRob Clark out_unlock:
291a6ae74c9SRob Clark 	msm_gem_unlock(obj);
292c8afe684SRob Clark out:
293a5f74ec7SSouptick Joarder 	return ret;
294c8afe684SRob Clark }
295c8afe684SRob Clark 
296c8afe684SRob Clark /** get mmap offset */
297c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
298c8afe684SRob Clark {
299c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
300c8afe684SRob Clark 	int ret;
301c8afe684SRob Clark 
30290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
303c8afe684SRob Clark 
304c8afe684SRob Clark 	/* Make it mmapable */
305c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
306c8afe684SRob Clark 
307c8afe684SRob Clark 	if (ret) {
3086a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
309c8afe684SRob Clark 		return 0;
310c8afe684SRob Clark 	}
311c8afe684SRob Clark 
312c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
313c8afe684SRob Clark }
314c8afe684SRob Clark 
315c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
316c8afe684SRob Clark {
317c8afe684SRob Clark 	uint64_t offset;
3180e08270aSSushmita Susheelendra 
319a6ae74c9SRob Clark 	msm_gem_lock(obj);
320c8afe684SRob Clark 	offset = mmap_offset(obj);
321a6ae74c9SRob Clark 	msm_gem_unlock(obj);
322c8afe684SRob Clark 	return offset;
323c8afe684SRob Clark }
324c8afe684SRob Clark 
3254b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
3264b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3274b85f7f5SRob Clark {
3284b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3294b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3304b85f7f5SRob Clark 
33190643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3320e08270aSSushmita Susheelendra 
3334b85f7f5SRob Clark 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3344b85f7f5SRob Clark 	if (!vma)
3354b85f7f5SRob Clark 		return ERR_PTR(-ENOMEM);
3364b85f7f5SRob Clark 
3374b85f7f5SRob Clark 	vma->aspace = aspace;
3384b85f7f5SRob Clark 
3394b85f7f5SRob Clark 	list_add_tail(&vma->list, &msm_obj->vmas);
3404b85f7f5SRob Clark 
3414b85f7f5SRob Clark 	return vma;
3424b85f7f5SRob Clark }
3434b85f7f5SRob Clark 
3444b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
3454b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3464b85f7f5SRob Clark {
3474b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3484b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3494b85f7f5SRob Clark 
35090643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3514b85f7f5SRob Clark 
3524b85f7f5SRob Clark 	list_for_each_entry(vma, &msm_obj->vmas, list) {
3534b85f7f5SRob Clark 		if (vma->aspace == aspace)
3544b85f7f5SRob Clark 			return vma;
3554b85f7f5SRob Clark 	}
3564b85f7f5SRob Clark 
3574b85f7f5SRob Clark 	return NULL;
3584b85f7f5SRob Clark }
3594b85f7f5SRob Clark 
3604b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma)
3614b85f7f5SRob Clark {
3624b85f7f5SRob Clark 	if (!vma)
3634b85f7f5SRob Clark 		return;
3644b85f7f5SRob Clark 
3654b85f7f5SRob Clark 	list_del(&vma->list);
3664b85f7f5SRob Clark 	kfree(vma);
3674b85f7f5SRob Clark }
3684b85f7f5SRob Clark 
36920d0ae2fSRob Clark /**
37020d0ae2fSRob Clark  * If close is true, this also closes the VMA (releasing the allocated
37120d0ae2fSRob Clark  * iova range) in addition to removing the iommu mapping.  In the eviction
37220d0ae2fSRob Clark  * case (!close), we keep the iova allocated, but only remove the iommu
37320d0ae2fSRob Clark  * mapping.
37420d0ae2fSRob Clark  */
3754fe5f65eSRob Clark static void
37620d0ae2fSRob Clark put_iova_spaces(struct drm_gem_object *obj, bool close)
3774fe5f65eSRob Clark {
3784fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3799b73bde3SIskren Chernev 	struct msm_gem_vma *vma;
3804fe5f65eSRob Clark 
38190643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3824fe5f65eSRob Clark 
3839b73bde3SIskren Chernev 	list_for_each_entry(vma, &msm_obj->vmas, list) {
384d67f1b6dSBrian Masney 		if (vma->aspace) {
3857ad0e8cfSJordan Crouse 			msm_gem_purge_vma(vma->aspace, vma);
38620d0ae2fSRob Clark 			if (close)
3877ad0e8cfSJordan Crouse 				msm_gem_close_vma(vma->aspace, vma);
388d67f1b6dSBrian Masney 		}
3899b73bde3SIskren Chernev 	}
3909b73bde3SIskren Chernev }
3919b73bde3SIskren Chernev 
3929b73bde3SIskren Chernev /* Called with msm_obj locked */
3939b73bde3SIskren Chernev static void
3949b73bde3SIskren Chernev put_iova_vmas(struct drm_gem_object *obj)
3954fe5f65eSRob Clark {
3964fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3974fe5f65eSRob Clark 	struct msm_gem_vma *vma, *tmp;
3984fe5f65eSRob Clark 
39990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
4004fe5f65eSRob Clark 
4014fe5f65eSRob Clark 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
4024b85f7f5SRob Clark 		del_vma(vma);
4034fe5f65eSRob Clark 	}
4044fe5f65eSRob Clark }
4054fe5f65eSRob Clark 
4068117e5e5SRob Clark static int get_iova_locked(struct drm_gem_object *obj,
407d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
408d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
409c8afe684SRob Clark {
4104b85f7f5SRob Clark 	struct msm_gem_vma *vma;
411c8afe684SRob Clark 	int ret = 0;
412c8afe684SRob Clark 
41390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
414cb1e3818SRob Clark 
4154b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
416871d812aSRob Clark 
4174b85f7f5SRob Clark 	if (!vma) {
4184b85f7f5SRob Clark 		vma = add_vma(obj, aspace);
419c0ee9794SJordan Crouse 		if (IS_ERR(vma))
420c0ee9794SJordan Crouse 			return PTR_ERR(vma);
4214b85f7f5SRob Clark 
422d3b8877eSJonathan Marek 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
423d3b8877eSJonathan Marek 			range_start, range_end);
424c0ee9794SJordan Crouse 		if (ret) {
425c0ee9794SJordan Crouse 			del_vma(vma);
426c0ee9794SJordan Crouse 			return ret;
427c8afe684SRob Clark 		}
4284b85f7f5SRob Clark 	}
4294b85f7f5SRob Clark 
4304b85f7f5SRob Clark 	*iova = vma->iova;
4314b85f7f5SRob Clark 	return 0;
432c0ee9794SJordan Crouse }
4334b85f7f5SRob Clark 
434c0ee9794SJordan Crouse static int msm_gem_pin_iova(struct drm_gem_object *obj,
435c0ee9794SJordan Crouse 		struct msm_gem_address_space *aspace)
436c0ee9794SJordan Crouse {
437c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
438c0ee9794SJordan Crouse 	struct msm_gem_vma *vma;
439c0ee9794SJordan Crouse 	struct page **pages;
44064fcbde7SRob Clark 	int ret, prot = IOMMU_READ;
441bbc2cd07SRob Clark 
442bbc2cd07SRob Clark 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
443bbc2cd07SRob Clark 		prot |= IOMMU_WRITE;
444c0ee9794SJordan Crouse 
4450b462d7aSJonathan Marek 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
4460b462d7aSJonathan Marek 		prot |= IOMMU_PRIV;
4470b462d7aSJonathan Marek 
44890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
449c0ee9794SJordan Crouse 
45090643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
451c0ee9794SJordan Crouse 		return -EBUSY;
452c0ee9794SJordan Crouse 
453c0ee9794SJordan Crouse 	vma = lookup_vma(obj, aspace);
45490643a24SRob Clark 	if (GEM_WARN_ON(!vma))
455c0ee9794SJordan Crouse 		return -EINVAL;
456c0ee9794SJordan Crouse 
457c0ee9794SJordan Crouse 	pages = get_pages(obj);
458c0ee9794SJordan Crouse 	if (IS_ERR(pages))
459c0ee9794SJordan Crouse 		return PTR_ERR(pages);
460c0ee9794SJordan Crouse 
46164fcbde7SRob Clark 	ret = msm_gem_map_vma(aspace, vma, prot,
462bbc2cd07SRob Clark 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
46364fcbde7SRob Clark 
46464fcbde7SRob Clark 	if (!ret)
46564fcbde7SRob Clark 		msm_obj->pin_count++;
46664fcbde7SRob Clark 
46764fcbde7SRob Clark 	return ret;
468c0ee9794SJordan Crouse }
469c0ee9794SJordan Crouse 
470e4b87d22SRob Clark static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
471d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
472d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
473c0ee9794SJordan Crouse {
474c0ee9794SJordan Crouse 	u64 local;
475c0ee9794SJordan Crouse 	int ret;
476c0ee9794SJordan Crouse 
47790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
478c0ee9794SJordan Crouse 
4798117e5e5SRob Clark 	ret = get_iova_locked(obj, aspace, &local,
480d3b8877eSJonathan Marek 		range_start, range_end);
481c0ee9794SJordan Crouse 
482c0ee9794SJordan Crouse 	if (!ret)
483c0ee9794SJordan Crouse 		ret = msm_gem_pin_iova(obj, aspace);
484c0ee9794SJordan Crouse 
485c0ee9794SJordan Crouse 	if (!ret)
486c0ee9794SJordan Crouse 		*iova = local;
487c0ee9794SJordan Crouse 
488c8afe684SRob Clark 	return ret;
489c8afe684SRob Clark }
490c8afe684SRob Clark 
491e4b87d22SRob Clark /*
492e4b87d22SRob Clark  * get iova and pin it. Should have a matching put
493e4b87d22SRob Clark  * limits iova to specified range (in pages)
494e4b87d22SRob Clark  */
495e4b87d22SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
496e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova,
497e4b87d22SRob Clark 		u64 range_start, u64 range_end)
498e4b87d22SRob Clark {
499e4b87d22SRob Clark 	int ret;
500e4b87d22SRob Clark 
501e4b87d22SRob Clark 	msm_gem_lock(obj);
502e4b87d22SRob Clark 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
503e4b87d22SRob Clark 	msm_gem_unlock(obj);
504e4b87d22SRob Clark 
505e4b87d22SRob Clark 	return ret;
506e4b87d22SRob Clark }
507e4b87d22SRob Clark 
508e4b87d22SRob Clark int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
509e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova)
510e4b87d22SRob Clark {
511e4b87d22SRob Clark 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
512e4b87d22SRob Clark }
513e4b87d22SRob Clark 
514d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */
515d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
516d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova)
517d3b8877eSJonathan Marek {
518d3b8877eSJonathan Marek 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
519d3b8877eSJonathan Marek }
520d3b8877eSJonathan Marek 
5217ad0e8cfSJordan Crouse /*
5227ad0e8cfSJordan Crouse  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
5237ad0e8cfSJordan Crouse  * valid for the life of the object
5247ad0e8cfSJordan Crouse  */
5259fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj,
5269fe041f6SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
5279fe041f6SJordan Crouse {
5289fe041f6SJordan Crouse 	int ret;
5299fe041f6SJordan Crouse 
530a6ae74c9SRob Clark 	msm_gem_lock(obj);
5318117e5e5SRob Clark 	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
532a6ae74c9SRob Clark 	msm_gem_unlock(obj);
5339fe041f6SJordan Crouse 
5349fe041f6SJordan Crouse 	return ret;
5359fe041f6SJordan Crouse }
5369fe041f6SJordan Crouse 
5372638d90aSRob Clark /* get iova without taking a reference, used in places where you have
5389fe041f6SJordan Crouse  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
5392638d90aSRob Clark  */
5408bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj,
5418bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
5422638d90aSRob Clark {
5434b85f7f5SRob Clark 	struct msm_gem_vma *vma;
5444b85f7f5SRob Clark 
545a6ae74c9SRob Clark 	msm_gem_lock(obj);
5464b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
547a6ae74c9SRob Clark 	msm_gem_unlock(obj);
54890643a24SRob Clark 	GEM_WARN_ON(!vma);
5494b85f7f5SRob Clark 
5504b85f7f5SRob Clark 	return vma ? vma->iova : 0;
5512638d90aSRob Clark }
5522638d90aSRob Clark 
5537ad0e8cfSJordan Crouse /*
554e4b87d22SRob Clark  * Locked variant of msm_gem_unpin_iova()
555e4b87d22SRob Clark  */
556e4b87d22SRob Clark void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
557e4b87d22SRob Clark 		struct msm_gem_address_space *aspace)
558e4b87d22SRob Clark {
55964fcbde7SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
560e4b87d22SRob Clark 	struct msm_gem_vma *vma;
561e4b87d22SRob Clark 
56290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
563e4b87d22SRob Clark 
564e4b87d22SRob Clark 	vma = lookup_vma(obj, aspace);
565e4b87d22SRob Clark 
56664fcbde7SRob Clark 	if (!GEM_WARN_ON(!vma)) {
567e4b87d22SRob Clark 		msm_gem_unmap_vma(aspace, vma);
56864fcbde7SRob Clark 
56964fcbde7SRob Clark 		msm_obj->pin_count--;
57064fcbde7SRob Clark 		GEM_WARN_ON(msm_obj->pin_count < 0);
57164fcbde7SRob Clark 
57264fcbde7SRob Clark 		update_inactive(msm_obj);
57364fcbde7SRob Clark 	}
574e4b87d22SRob Clark }
575e4b87d22SRob Clark 
576e4b87d22SRob Clark /*
5777ad0e8cfSJordan Crouse  * Unpin a iova by updating the reference counts. The memory isn't actually
5787ad0e8cfSJordan Crouse  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
5797ad0e8cfSJordan Crouse  * to get rid of it
5807ad0e8cfSJordan Crouse  */
5817ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj,
5828bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
583c8afe684SRob Clark {
584a6ae74c9SRob Clark 	msm_gem_lock(obj);
585e4b87d22SRob Clark 	msm_gem_unpin_iova_locked(obj, aspace);
586a6ae74c9SRob Clark 	msm_gem_unlock(obj);
587c8afe684SRob Clark }
588c8afe684SRob Clark 
589c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
590c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
591c8afe684SRob Clark {
592c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
593c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
594c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
5950815d774SJordan Crouse 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
596c8afe684SRob Clark }
597c8afe684SRob Clark 
598c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
599c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
600c8afe684SRob Clark {
601c8afe684SRob Clark 	struct drm_gem_object *obj;
602c8afe684SRob Clark 	int ret = 0;
603c8afe684SRob Clark 
604c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
605a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
606c8afe684SRob Clark 	if (obj == NULL) {
607c8afe684SRob Clark 		ret = -ENOENT;
608c8afe684SRob Clark 		goto fail;
609c8afe684SRob Clark 	}
610c8afe684SRob Clark 
611c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
612c8afe684SRob Clark 
613f7d33950SEmil Velikov 	drm_gem_object_put(obj);
614c8afe684SRob Clark 
615c8afe684SRob Clark fail:
616c8afe684SRob Clark 	return ret;
617c8afe684SRob Clark }
618c8afe684SRob Clark 
619fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
620c8afe684SRob Clark {
6210e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6220e08270aSSushmita Susheelendra 	int ret = 0;
6230e08270aSSushmita Susheelendra 
62490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
625e4b87d22SRob Clark 
6268b6b7d84SDaniel Vetter 	if (obj->import_attach)
6278b6b7d84SDaniel Vetter 		return ERR_PTR(-ENODEV);
6288b6b7d84SDaniel Vetter 
62990643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
6306a41da17SMamta Shukla 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
631fad33f4bSRob Clark 			msm_obj->madv, madv);
6320e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
633c8afe684SRob Clark 	}
634c8afe684SRob Clark 
6350e08270aSSushmita Susheelendra 	/* increment vmap_count *before* vmap() call, so shrinker can
636a6ae74c9SRob Clark 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
6370e08270aSSushmita Susheelendra 	 * This guarantees that we won't try to msm_gem_vunmap() this
6380e08270aSSushmita Susheelendra 	 * same object from within the vmap() call (while we already
639a6ae74c9SRob Clark 	 * hold msm_obj lock)
6400e08270aSSushmita Susheelendra 	 */
6410e08270aSSushmita Susheelendra 	msm_obj->vmap_count++;
6420e08270aSSushmita Susheelendra 
6430e08270aSSushmita Susheelendra 	if (!msm_obj->vaddr) {
6440e08270aSSushmita Susheelendra 		struct page **pages = get_pages(obj);
6450e08270aSSushmita Susheelendra 		if (IS_ERR(pages)) {
6460e08270aSSushmita Susheelendra 			ret = PTR_ERR(pages);
6470e08270aSSushmita Susheelendra 			goto fail;
6480e08270aSSushmita Susheelendra 		}
6490e08270aSSushmita Susheelendra 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
650*af9b3547SJonathan Marek 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
6510e08270aSSushmita Susheelendra 		if (msm_obj->vaddr == NULL) {
6520e08270aSSushmita Susheelendra 			ret = -ENOMEM;
6530e08270aSSushmita Susheelendra 			goto fail;
6540e08270aSSushmita Susheelendra 		}
65510f76165SRob Clark 
65610f76165SRob Clark 		update_inactive(msm_obj);
6570e08270aSSushmita Susheelendra 	}
6580e08270aSSushmita Susheelendra 
6590e08270aSSushmita Susheelendra 	return msm_obj->vaddr;
6600e08270aSSushmita Susheelendra 
6610e08270aSSushmita Susheelendra fail:
662e1e9db2cSRob Clark 	msm_obj->vmap_count--;
6630e08270aSSushmita Susheelendra 	return ERR_PTR(ret);
66418f23049SRob Clark }
66518f23049SRob Clark 
666e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
667e4b87d22SRob Clark {
668e4b87d22SRob Clark 	return get_vaddr(obj, MSM_MADV_WILLNEED);
669e4b87d22SRob Clark }
670e4b87d22SRob Clark 
671fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
672fad33f4bSRob Clark {
673e4b87d22SRob Clark 	void *ret;
674e4b87d22SRob Clark 
675e4b87d22SRob Clark 	msm_gem_lock(obj);
676e4b87d22SRob Clark 	ret = msm_gem_get_vaddr_locked(obj);
677e4b87d22SRob Clark 	msm_gem_unlock(obj);
678e4b87d22SRob Clark 
679e4b87d22SRob Clark 	return ret;
680fad33f4bSRob Clark }
681fad33f4bSRob Clark 
682fad33f4bSRob Clark /*
683fad33f4bSRob Clark  * Don't use this!  It is for the very special case of dumping
684fad33f4bSRob Clark  * submits from GPU hangs or faults, were the bo may already
685fad33f4bSRob Clark  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
686fad33f4bSRob Clark  * active list.
687fad33f4bSRob Clark  */
688fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
689fad33f4bSRob Clark {
690fad33f4bSRob Clark 	return get_vaddr(obj, __MSM_MADV_PURGED);
691fad33f4bSRob Clark }
692fad33f4bSRob Clark 
693e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
69418f23049SRob Clark {
6950e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6960e08270aSSushmita Susheelendra 
69790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
69890643a24SRob Clark 	GEM_WARN_ON(msm_obj->vmap_count < 1);
699e4b87d22SRob Clark 
7000e08270aSSushmita Susheelendra 	msm_obj->vmap_count--;
7014cd33c48SRob Clark }
7020e08270aSSushmita Susheelendra 
7030e08270aSSushmita Susheelendra void msm_gem_put_vaddr(struct drm_gem_object *obj)
7044cd33c48SRob Clark {
705a6ae74c9SRob Clark 	msm_gem_lock(obj);
706e4b87d22SRob Clark 	msm_gem_put_vaddr_locked(obj);
707a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7084cd33c48SRob Clark }
7094cd33c48SRob Clark 
7104cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
7114cd33c48SRob Clark  * false or -errno.
7124cd33c48SRob Clark  */
7134cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
7144cd33c48SRob Clark {
7154cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
71668209390SRob Clark 
717a6ae74c9SRob Clark 	msm_gem_lock(obj);
718c8afe684SRob Clark 
719c8afe684SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
720c8afe684SRob Clark 		msm_obj->madv = madv;
721c8afe684SRob Clark 
7220e08270aSSushmita Susheelendra 	madv = msm_obj->madv;
7230e08270aSSushmita Susheelendra 
7243edfa30fSRob Clark 	/* If the obj is inactive, we might need to move it
7253edfa30fSRob Clark 	 * between inactive lists
7263edfa30fSRob Clark 	 */
7273edfa30fSRob Clark 	if (msm_obj->active_count == 0)
7283edfa30fSRob Clark 		update_inactive(msm_obj);
7293edfa30fSRob Clark 
730a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7310e08270aSSushmita Susheelendra 
7320e08270aSSushmita Susheelendra 	return (madv != __MSM_MADV_PURGED);
733c8afe684SRob Clark }
734c8afe684SRob Clark 
735599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj)
73668209390SRob Clark {
73768209390SRob Clark 	struct drm_device *dev = obj->dev;
73868209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
73968209390SRob Clark 
74081d4d597SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
74190643a24SRob Clark 	GEM_WARN_ON(!is_purgeable(msm_obj));
74268209390SRob Clark 
74320d0ae2fSRob Clark 	/* Get rid of any iommu mapping(s): */
74420d0ae2fSRob Clark 	put_iova_spaces(obj, true);
7450e08270aSSushmita Susheelendra 
746599089c6SRob Clark 	msm_gem_vunmap(obj);
74768209390SRob Clark 
74881d4d597SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
74981d4d597SRob Clark 
75068209390SRob Clark 	put_pages(obj);
75168209390SRob Clark 
7529b73bde3SIskren Chernev 	put_iova_vmas(obj);
7539b73bde3SIskren Chernev 
75468209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
75525ed38b3SRob Clark 	update_inactive(msm_obj);
75668209390SRob Clark 
75768209390SRob Clark 	drm_gem_free_mmap_offset(obj);
75868209390SRob Clark 
75968209390SRob Clark 	/* Our goal here is to return as much of the memory as
76068209390SRob Clark 	 * is possible back to the system as we are called from OOM.
76168209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
76268209390SRob Clark 	 * backing pages, *now*.
76368209390SRob Clark 	 */
76468209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
76568209390SRob Clark 
76668209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
76768209390SRob Clark 			0, (loff_t)-1);
76868209390SRob Clark }
76968209390SRob Clark 
77063f17ef8SRob Clark /**
77163f17ef8SRob Clark  * Unpin the backing pages and make them available to be swapped out.
77263f17ef8SRob Clark  */
77363f17ef8SRob Clark void msm_gem_evict(struct drm_gem_object *obj)
77463f17ef8SRob Clark {
77563f17ef8SRob Clark 	struct drm_device *dev = obj->dev;
77663f17ef8SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
77763f17ef8SRob Clark 
77863f17ef8SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
77963f17ef8SRob Clark 	GEM_WARN_ON(is_unevictable(msm_obj));
78063f17ef8SRob Clark 	GEM_WARN_ON(!msm_obj->evictable);
78163f17ef8SRob Clark 	GEM_WARN_ON(msm_obj->active_count);
78263f17ef8SRob Clark 
78363f17ef8SRob Clark 	/* Get rid of any iommu mapping(s): */
78463f17ef8SRob Clark 	put_iova_spaces(obj, false);
78563f17ef8SRob Clark 
78663f17ef8SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
78763f17ef8SRob Clark 
78863f17ef8SRob Clark 	put_pages(obj);
78963f17ef8SRob Clark 
79063f17ef8SRob Clark 	update_inactive(msm_obj);
79163f17ef8SRob Clark }
79263f17ef8SRob Clark 
793599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj)
794e1e9db2cSRob Clark {
795e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
796e1e9db2cSRob Clark 
79790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
7980e08270aSSushmita Susheelendra 
79990643a24SRob Clark 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
800e1e9db2cSRob Clark 		return;
801e1e9db2cSRob Clark 
802e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
803e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
804e1e9db2cSRob Clark }
805e1e9db2cSRob Clark 
806b6295f9aSRob Clark /* must be called before _move_to_active().. */
807b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj,
808b6295f9aSRob Clark 		struct msm_fence_context *fctx, bool exclusive)
809b6295f9aSRob Clark {
81052791eeeSChristian König 	struct dma_resv_list *fobj;
811f54d1867SChris Wilson 	struct dma_fence *fence;
812b6295f9aSRob Clark 	int i, ret;
813b6295f9aSRob Clark 
81452791eeeSChristian König 	fobj = dma_resv_get_list(obj->resv);
815b6295f9aSRob Clark 	if (!fobj || (fobj->shared_count == 0)) {
81652791eeeSChristian König 		fence = dma_resv_get_excl(obj->resv);
817b6295f9aSRob Clark 		/* don't need to wait on our own fences, since ring is fifo */
818b6295f9aSRob Clark 		if (fence && (fence->context != fctx->context)) {
819f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
820b6295f9aSRob Clark 			if (ret)
821b6295f9aSRob Clark 				return ret;
822b6295f9aSRob Clark 		}
823b6295f9aSRob Clark 	}
824b6295f9aSRob Clark 
825b6295f9aSRob Clark 	if (!exclusive || !fobj)
826b6295f9aSRob Clark 		return 0;
827b6295f9aSRob Clark 
828b6295f9aSRob Clark 	for (i = 0; i < fobj->shared_count; i++) {
829b6295f9aSRob Clark 		fence = rcu_dereference_protected(fobj->shared[i],
83052791eeeSChristian König 						dma_resv_held(obj->resv));
831b6295f9aSRob Clark 		if (fence->context != fctx->context) {
832f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
833b6295f9aSRob Clark 			if (ret)
834b6295f9aSRob Clark 				return ret;
835b6295f9aSRob Clark 		}
836b6295f9aSRob Clark 	}
837b6295f9aSRob Clark 
838b6295f9aSRob Clark 	return 0;
839b6295f9aSRob Clark }
840b6295f9aSRob Clark 
8419d8baa2bSAkhil P Oommen void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
8427198e6b0SRob Clark {
8437198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
844d984457bSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
845d984457bSRob Clark 
846d984457bSRob Clark 	might_sleep();
84790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
84890643a24SRob Clark 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
84990643a24SRob Clark 	GEM_WARN_ON(msm_obj->dontneed);
85064fcbde7SRob Clark 	GEM_WARN_ON(!msm_obj->sgt);
8519d8baa2bSAkhil P Oommen 
852ab5c54cbSRob Clark 	if (msm_obj->active_count++ == 0) {
853d984457bSRob Clark 		mutex_lock(&priv->mm_lock);
85464fcbde7SRob Clark 		if (msm_obj->evictable)
85564fcbde7SRob Clark 			mark_unevictable(msm_obj);
856cc8a4d5aSRob Clark 		list_del(&msm_obj->mm_list);
8577198e6b0SRob Clark 		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
858d984457bSRob Clark 		mutex_unlock(&priv->mm_lock);
8597198e6b0SRob Clark 	}
8609d8baa2bSAkhil P Oommen }
8617198e6b0SRob Clark 
8629d8baa2bSAkhil P Oommen void msm_gem_active_put(struct drm_gem_object *obj)
8637198e6b0SRob Clark {
8647198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
8657198e6b0SRob Clark 
866d984457bSRob Clark 	might_sleep();
86790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
8687198e6b0SRob Clark 
869ab5c54cbSRob Clark 	if (--msm_obj->active_count == 0) {
8703edfa30fSRob Clark 		update_inactive(msm_obj);
8717198e6b0SRob Clark 	}
8729d8baa2bSAkhil P Oommen }
8737198e6b0SRob Clark 
8743edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj)
8753edfa30fSRob Clark {
8763edfa30fSRob Clark 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
8773edfa30fSRob Clark 
87864fcbde7SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
87964fcbde7SRob Clark 
88064fcbde7SRob Clark 	if (msm_obj->active_count != 0)
88164fcbde7SRob Clark 		return;
88264fcbde7SRob Clark 
8833edfa30fSRob Clark 	mutex_lock(&priv->mm_lock);
8843edfa30fSRob Clark 
885cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
8860054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
88764fcbde7SRob Clark 	if (msm_obj->evictable)
88864fcbde7SRob Clark 		mark_unevictable(msm_obj);
889cc8a4d5aSRob Clark 
890cc8a4d5aSRob Clark 	list_del(&msm_obj->mm_list);
89164fcbde7SRob Clark 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
8923edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
89364fcbde7SRob Clark 		mark_evictable(msm_obj);
894cc8a4d5aSRob Clark 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
8953edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
8960054eeb7SRob Clark 		mark_purgeable(msm_obj);
897cc8a4d5aSRob Clark 	} else {
89864fcbde7SRob Clark 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
89964fcbde7SRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
900cc8a4d5aSRob Clark 	}
9013edfa30fSRob Clark 
9023edfa30fSRob Clark 	mutex_unlock(&priv->mm_lock);
9033edfa30fSRob Clark }
9043edfa30fSRob Clark 
905ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
906ba00c3f2SRob Clark {
907b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
908f755e227SChris Wilson 	unsigned long remain =
909f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
910f755e227SChris Wilson 	long ret;
911b6295f9aSRob Clark 
91252791eeeSChristian König 	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
913f755e227SChris Wilson 						  true,  remain);
914f755e227SChris Wilson 	if (ret == 0)
915f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
916f755e227SChris Wilson 	else if (ret < 0)
917f755e227SChris Wilson 		return ret;
918ba00c3f2SRob Clark 
9197198e6b0SRob Clark 	/* TODO cache maintenance */
9207198e6b0SRob Clark 
921b6295f9aSRob Clark 	return 0;
9227198e6b0SRob Clark }
9237198e6b0SRob Clark 
9247198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
9257198e6b0SRob Clark {
9267198e6b0SRob Clark 	/* TODO cache maintenance */
927c8afe684SRob Clark 	return 0;
928c8afe684SRob Clark }
929c8afe684SRob Clark 
930c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
931f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type,
932b6295f9aSRob Clark 		struct seq_file *m)
933b6295f9aSRob Clark {
934f54d1867SChris Wilson 	if (!dma_fence_is_signaled(fence))
935a3115621SDave Airlie 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
936b6295f9aSRob Clark 				fence->ops->get_driver_name(fence),
937b6295f9aSRob Clark 				fence->ops->get_timeline_name(fence),
938b6295f9aSRob Clark 				fence->seqno);
939b6295f9aSRob Clark }
940b6295f9aSRob Clark 
941528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
942528107c8SRob Clark 		struct msm_gem_stats *stats)
943c8afe684SRob Clark {
944c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
94552791eeeSChristian König 	struct dma_resv *robj = obj->resv;
94652791eeeSChristian König 	struct dma_resv_list *fobj;
947f54d1867SChris Wilson 	struct dma_fence *fence;
9484b85f7f5SRob Clark 	struct msm_gem_vma *vma;
949c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
9504cd33c48SRob Clark 	const char *madv;
951c8afe684SRob Clark 
952a6ae74c9SRob Clark 	msm_gem_lock(obj);
953b6295f9aSRob Clark 
954528107c8SRob Clark 	stats->all.count++;
955528107c8SRob Clark 	stats->all.size += obj->size;
956528107c8SRob Clark 
957528107c8SRob Clark 	if (is_active(msm_obj)) {
958528107c8SRob Clark 		stats->active.count++;
959528107c8SRob Clark 		stats->active.size += obj->size;
960528107c8SRob Clark 	}
961528107c8SRob Clark 
962f48f3563SRob Clark 	if (msm_obj->pages) {
963f48f3563SRob Clark 		stats->resident.count++;
964f48f3563SRob Clark 		stats->resident.size += obj->size;
965f48f3563SRob Clark 	}
966f48f3563SRob Clark 
9674cd33c48SRob Clark 	switch (msm_obj->madv) {
9684cd33c48SRob Clark 	case __MSM_MADV_PURGED:
969528107c8SRob Clark 		stats->purged.count++;
970528107c8SRob Clark 		stats->purged.size += obj->size;
9714cd33c48SRob Clark 		madv = " purged";
9724cd33c48SRob Clark 		break;
9734cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
9740054eeb7SRob Clark 		stats->purgeable.count++;
9750054eeb7SRob Clark 		stats->purgeable.size += obj->size;
9764cd33c48SRob Clark 		madv = " purgeable";
9774cd33c48SRob Clark 		break;
9784cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
9794cd33c48SRob Clark 	default:
9804cd33c48SRob Clark 		madv = "";
9814cd33c48SRob Clark 		break;
9824cd33c48SRob Clark 	}
9834cd33c48SRob Clark 
984575f0485SJordan Crouse 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
9857198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
9862c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
987667ce33eSRob Clark 			off, msm_obj->vaddr);
988667ce33eSRob Clark 
9890815d774SJordan Crouse 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
990667ce33eSRob Clark 
991575f0485SJordan Crouse 	if (!list_empty(&msm_obj->vmas)) {
992575f0485SJordan Crouse 
993575f0485SJordan Crouse 		seq_puts(m, "      vmas:");
994575f0485SJordan Crouse 
99525faf2f2SRob Clark 		list_for_each_entry(vma, &msm_obj->vmas, list) {
99625faf2f2SRob Clark 			const char *name, *comm;
99725faf2f2SRob Clark 			if (vma->aspace) {
99825faf2f2SRob Clark 				struct msm_gem_address_space *aspace = vma->aspace;
99925faf2f2SRob Clark 				struct task_struct *task =
100025faf2f2SRob Clark 					get_pid_task(aspace->pid, PIDTYPE_PID);
100125faf2f2SRob Clark 				if (task) {
100225faf2f2SRob Clark 					comm = kstrdup(task->comm, GFP_KERNEL);
100325faf2f2SRob Clark 				} else {
100425faf2f2SRob Clark 					comm = NULL;
100525faf2f2SRob Clark 				}
100625faf2f2SRob Clark 				name = aspace->name;
100725faf2f2SRob Clark 			} else {
100825faf2f2SRob Clark 				name = comm = NULL;
100925faf2f2SRob Clark 			}
101025faf2f2SRob Clark 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
101125faf2f2SRob Clark 				name, comm ? ":" : "", comm ? comm : "",
101225faf2f2SRob Clark 				vma->aspace, vma->iova,
101325faf2f2SRob Clark 				vma->mapped ? "mapped" : "unmapped",
10147ad0e8cfSJordan Crouse 				vma->inuse);
101525faf2f2SRob Clark 			kfree(comm);
101625faf2f2SRob Clark 		}
1017575f0485SJordan Crouse 
1018575f0485SJordan Crouse 		seq_puts(m, "\n");
1019575f0485SJordan Crouse 	}
1020b6295f9aSRob Clark 
1021b6295f9aSRob Clark 	rcu_read_lock();
1022b6295f9aSRob Clark 	fobj = rcu_dereference(robj->fence);
1023b6295f9aSRob Clark 	if (fobj) {
1024b6295f9aSRob Clark 		unsigned int i, shared_count = fobj->shared_count;
1025b6295f9aSRob Clark 
1026b6295f9aSRob Clark 		for (i = 0; i < shared_count; i++) {
1027b6295f9aSRob Clark 			fence = rcu_dereference(fobj->shared[i]);
1028b6295f9aSRob Clark 			describe_fence(fence, "Shared", m);
1029b6295f9aSRob Clark 		}
1030b6295f9aSRob Clark 	}
1031b6295f9aSRob Clark 
1032b6295f9aSRob Clark 	fence = rcu_dereference(robj->fence_excl);
1033b6295f9aSRob Clark 	if (fence)
1034b6295f9aSRob Clark 		describe_fence(fence, "Exclusive", m);
1035b6295f9aSRob Clark 	rcu_read_unlock();
10360e08270aSSushmita Susheelendra 
1037a6ae74c9SRob Clark 	msm_gem_unlock(obj);
1038c8afe684SRob Clark }
1039c8afe684SRob Clark 
1040c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1041c8afe684SRob Clark {
1042528107c8SRob Clark 	struct msm_gem_stats stats = {};
1043c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1044c8afe684SRob Clark 
10450815d774SJordan Crouse 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
10466ed0897cSRob Clark 	list_for_each_entry(msm_obj, list, node) {
1047c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
1048575f0485SJordan Crouse 		seq_puts(m, "   ");
1049528107c8SRob Clark 		msm_gem_describe(obj, m, &stats);
1050c8afe684SRob Clark 	}
1051c8afe684SRob Clark 
1052528107c8SRob Clark 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1053528107c8SRob Clark 			stats.all.count, stats.all.size);
1054528107c8SRob Clark 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1055528107c8SRob Clark 			stats.active.count, stats.active.size);
1056f48f3563SRob Clark 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1057f48f3563SRob Clark 			stats.resident.count, stats.resident.size);
1058f1902c6bSColin Ian King 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
10590054eeb7SRob Clark 			stats.purgeable.count, stats.purgeable.size);
1060528107c8SRob Clark 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1061528107c8SRob Clark 			stats.purged.count, stats.purged.size);
1062c8afe684SRob Clark }
1063c8afe684SRob Clark #endif
1064c8afe684SRob Clark 
1065eecd7fd8SEmil Velikov /* don't call directly!  Use drm_gem_object_put_locked() and friends */
1066c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
1067c8afe684SRob Clark {
1068c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
106948e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
107048e7f183SKristian H. Kristensen 	struct msm_drm_private *priv = dev->dev_private;
107148e7f183SKristian H. Kristensen 
10726ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
10736ed0897cSRob Clark 	list_del(&msm_obj->node);
10746ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
10756ed0897cSRob Clark 
1076d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
1077cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
10780054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
1079c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
1080d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
1081c8afe684SRob Clark 
1082a6ae74c9SRob Clark 	msm_gem_lock(obj);
1083c8afe684SRob Clark 
1084c8afe684SRob Clark 	/* object should not be on active list: */
108590643a24SRob Clark 	GEM_WARN_ON(is_active(msm_obj));
1086c8afe684SRob Clark 
108720d0ae2fSRob Clark 	put_iova_spaces(obj, true);
1088c8afe684SRob Clark 
108905b84911SRob Clark 	if (obj->import_attach) {
109090643a24SRob Clark 		GEM_WARN_ON(msm_obj->vaddr);
109105b84911SRob Clark 
109205b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
109305b84911SRob Clark 		 * ours, just free the array we allocated:
109405b84911SRob Clark 		 */
10952098105eSMichal Hocko 		kvfree(msm_obj->pages);
109605b84911SRob Clark 
109757f04815SRob Clark 		put_iova_vmas(obj);
109857f04815SRob Clark 
10996c0e3ea2SRob Clark 		/* dma_buf_detach() grabs resv lock, so we need to unlock
11006c0e3ea2SRob Clark 		 * prior to drm_prime_gem_destroy
11016c0e3ea2SRob Clark 		 */
11026c0e3ea2SRob Clark 		msm_gem_unlock(obj);
11036c0e3ea2SRob Clark 
1104f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
110505b84911SRob Clark 	} else {
1106599089c6SRob Clark 		msm_gem_vunmap(obj);
1107c8afe684SRob Clark 		put_pages(obj);
110857f04815SRob Clark 		put_iova_vmas(obj);
11096c0e3ea2SRob Clark 		msm_gem_unlock(obj);
111005b84911SRob Clark 	}
1111c8afe684SRob Clark 
1112c8afe684SRob Clark 	drm_gem_object_release(obj);
1113c8afe684SRob Clark 
1114c8afe684SRob Clark 	kfree(msm_obj);
1115c8afe684SRob Clark }
1116c8afe684SRob Clark 
1117c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
1118c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
11190815d774SJordan Crouse 		uint32_t size, uint32_t flags, uint32_t *handle,
11200815d774SJordan Crouse 		char *name)
1121c8afe684SRob Clark {
1122c8afe684SRob Clark 	struct drm_gem_object *obj;
1123c8afe684SRob Clark 	int ret;
1124c8afe684SRob Clark 
1125c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
1126c8afe684SRob Clark 
1127c8afe684SRob Clark 	if (IS_ERR(obj))
1128c8afe684SRob Clark 		return PTR_ERR(obj);
1129c8afe684SRob Clark 
11300815d774SJordan Crouse 	if (name)
11310815d774SJordan Crouse 		msm_gem_object_set_name(obj, "%s", name);
11320815d774SJordan Crouse 
1133c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
1134c8afe684SRob Clark 
1135c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
1136f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1137c8afe684SRob Clark 
1138c8afe684SRob Clark 	return ret;
1139c8afe684SRob Clark }
1140c8afe684SRob Clark 
11413c9edd9cSThomas Zimmermann static const struct vm_operations_struct vm_ops = {
11423c9edd9cSThomas Zimmermann 	.fault = msm_gem_fault,
11433c9edd9cSThomas Zimmermann 	.open = drm_gem_vm_open,
11443c9edd9cSThomas Zimmermann 	.close = drm_gem_vm_close,
11453c9edd9cSThomas Zimmermann };
11463c9edd9cSThomas Zimmermann 
11473c9edd9cSThomas Zimmermann static const struct drm_gem_object_funcs msm_gem_object_funcs = {
11483c9edd9cSThomas Zimmermann 	.free = msm_gem_free_object,
11493c9edd9cSThomas Zimmermann 	.pin = msm_gem_prime_pin,
11503c9edd9cSThomas Zimmermann 	.unpin = msm_gem_prime_unpin,
11513c9edd9cSThomas Zimmermann 	.get_sg_table = msm_gem_prime_get_sg_table,
11523c9edd9cSThomas Zimmermann 	.vmap = msm_gem_prime_vmap,
11533c9edd9cSThomas Zimmermann 	.vunmap = msm_gem_prime_vunmap,
11543c9edd9cSThomas Zimmermann 	.vm_ops = &vm_ops,
11553c9edd9cSThomas Zimmermann };
11563c9edd9cSThomas Zimmermann 
115705b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
115805b84911SRob Clark 		uint32_t size, uint32_t flags,
11593cbdc8d8SAkhil P Oommen 		struct drm_gem_object **obj)
1160c8afe684SRob Clark {
1161c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1162c8afe684SRob Clark 
1163c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
1164c8afe684SRob Clark 	case MSM_BO_UNCACHED:
1165c8afe684SRob Clark 	case MSM_BO_CACHED:
1166c8afe684SRob Clark 	case MSM_BO_WC:
1167c8afe684SRob Clark 		break;
1168c8afe684SRob Clark 	default:
11696a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1170c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
117105b84911SRob Clark 		return -EINVAL;
1172c8afe684SRob Clark 	}
1173c8afe684SRob Clark 
1174667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
117505b84911SRob Clark 	if (!msm_obj)
117605b84911SRob Clark 		return -ENOMEM;
1177c8afe684SRob Clark 
1178c8afe684SRob Clark 	msm_obj->flags = flags;
11794cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
1180c8afe684SRob Clark 
11817198e6b0SRob Clark 	INIT_LIST_HEAD(&msm_obj->submit_entry);
11824b85f7f5SRob Clark 	INIT_LIST_HEAD(&msm_obj->vmas);
11834b85f7f5SRob Clark 
118405b84911SRob Clark 	*obj = &msm_obj->base;
11853c9edd9cSThomas Zimmermann 	(*obj)->funcs = &msm_gem_object_funcs;
118605b84911SRob Clark 
118705b84911SRob Clark 	return 0;
118805b84911SRob Clark }
118905b84911SRob Clark 
11900e08270aSSushmita Susheelendra static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
11910e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
119205b84911SRob Clark {
1193f4839bd5SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
11943cbdc8d8SAkhil P Oommen 	struct msm_gem_object *msm_obj;
1195871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
1196f4839bd5SRob Clark 	bool use_vram = false;
119705b84911SRob Clark 	int ret;
119805b84911SRob Clark 
119905b84911SRob Clark 	size = PAGE_ALIGN(size);
120005b84911SRob Clark 
1201c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev))
1202f4839bd5SRob Clark 		use_vram = true;
120386f46f25SJonathan Marek 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1204f4839bd5SRob Clark 		use_vram = true;
1205f4839bd5SRob Clark 
120690643a24SRob Clark 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1207f4839bd5SRob Clark 		return ERR_PTR(-EINVAL);
1208f4839bd5SRob Clark 
12091a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
12101a5dff5dSJordan Crouse 	 * infrastructure grumpy
12111a5dff5dSJordan Crouse 	 */
12121a5dff5dSJordan Crouse 	if (size == 0)
12131a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
12141a5dff5dSJordan Crouse 
12153cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, flags, &obj);
121605b84911SRob Clark 	if (ret)
121705b84911SRob Clark 		goto fail;
121805b84911SRob Clark 
12193cbdc8d8SAkhil P Oommen 	msm_obj = to_msm_bo(obj);
12203cbdc8d8SAkhil P Oommen 
1221f4839bd5SRob Clark 	if (use_vram) {
12224b85f7f5SRob Clark 		struct msm_gem_vma *vma;
1223f4839bd5SRob Clark 		struct page **pages;
1224b3949a9aSHans Verkuil 
1225a694ffedSIskren Chernev 		drm_gem_private_object_init(dev, obj, size);
1226a694ffedSIskren Chernev 
1227a6ae74c9SRob Clark 		msm_gem_lock(obj);
1228f4839bd5SRob Clark 
12294b85f7f5SRob Clark 		vma = add_vma(obj, NULL);
1230a6ae74c9SRob Clark 		msm_gem_unlock(obj);
12314b85f7f5SRob Clark 		if (IS_ERR(vma)) {
12324b85f7f5SRob Clark 			ret = PTR_ERR(vma);
12334b85f7f5SRob Clark 			goto fail;
12344b85f7f5SRob Clark 		}
12354b85f7f5SRob Clark 
12364b85f7f5SRob Clark 		to_msm_bo(obj)->vram_node = &vma->node;
12374b85f7f5SRob Clark 
123807fcad0dSIskren Chernev 		msm_gem_lock(obj);
1239f4839bd5SRob Clark 		pages = get_pages(obj);
124007fcad0dSIskren Chernev 		msm_gem_unlock(obj);
1241f4839bd5SRob Clark 		if (IS_ERR(pages)) {
1242f4839bd5SRob Clark 			ret = PTR_ERR(pages);
1243f4839bd5SRob Clark 			goto fail;
1244f4839bd5SRob Clark 		}
12454b85f7f5SRob Clark 
12464b85f7f5SRob Clark 		vma->iova = physaddr(obj);
1247f4839bd5SRob Clark 	} else {
124805b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
124905b84911SRob Clark 		if (ret)
125005b84911SRob Clark 			goto fail;
12510abdba47SLucas Stach 		/*
12520abdba47SLucas Stach 		 * Our buffers are kept pinned, so allocating them from the
12530abdba47SLucas Stach 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
12540abdba47SLucas Stach 		 * See comments above new_inode() why this is required _and_
12550abdba47SLucas Stach 		 * expected if you're going to pin these pages.
12560abdba47SLucas Stach 		 */
12570abdba47SLucas Stach 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1258871d812aSRob Clark 	}
125905b84911SRob Clark 
1260d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
126164fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1262d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
12633cbdc8d8SAkhil P Oommen 
12646ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
12656ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
12666ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
12676ed0897cSRob Clark 
126805b84911SRob Clark 	return obj;
126905b84911SRob Clark 
127005b84911SRob Clark fail:
1271ce0a9dc0SRob Clark 	if (struct_mutex_locked) {
1272ce0a9dc0SRob Clark 		drm_gem_object_put_locked(obj);
1273ce0a9dc0SRob Clark 	} else {
1274f7d33950SEmil Velikov 		drm_gem_object_put(obj);
1275ce0a9dc0SRob Clark 	}
127605b84911SRob Clark 	return ERR_PTR(ret);
127705b84911SRob Clark }
127805b84911SRob Clark 
12790e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
12800e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags)
12810e08270aSSushmita Susheelendra {
12820e08270aSSushmita Susheelendra 	return _msm_gem_new(dev, size, flags, true);
12830e08270aSSushmita Susheelendra }
12840e08270aSSushmita Susheelendra 
12850e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new(struct drm_device *dev,
12860e08270aSSushmita Susheelendra 		uint32_t size, uint32_t flags)
12870e08270aSSushmita Susheelendra {
12880e08270aSSushmita Susheelendra 	return _msm_gem_new(dev, size, flags, false);
12890e08270aSSushmita Susheelendra }
12900e08270aSSushmita Susheelendra 
129105b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
129279f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
129305b84911SRob Clark {
12943cbdc8d8SAkhil P Oommen 	struct msm_drm_private *priv = dev->dev_private;
129505b84911SRob Clark 	struct msm_gem_object *msm_obj;
129605b84911SRob Clark 	struct drm_gem_object *obj;
129779f0e202SRob Clark 	uint32_t size;
129805b84911SRob Clark 	int ret, npages;
129905b84911SRob Clark 
1300871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
1301c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev)) {
13026a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1303871d812aSRob Clark 		return ERR_PTR(-EINVAL);
1304871d812aSRob Clark 	}
1305871d812aSRob Clark 
130679f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
130705b84911SRob Clark 
13083cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
130905b84911SRob Clark 	if (ret)
131005b84911SRob Clark 		goto fail;
131105b84911SRob Clark 
131205b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
131305b84911SRob Clark 
131405b84911SRob Clark 	npages = size / PAGE_SIZE;
131505b84911SRob Clark 
131605b84911SRob Clark 	msm_obj = to_msm_bo(obj);
1317a6ae74c9SRob Clark 	msm_gem_lock(obj);
131805b84911SRob Clark 	msm_obj->sgt = sgt;
13192098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
132005b84911SRob Clark 	if (!msm_obj->pages) {
1321a6ae74c9SRob Clark 		msm_gem_unlock(obj);
132205b84911SRob Clark 		ret = -ENOMEM;
132305b84911SRob Clark 		goto fail;
132405b84911SRob Clark 	}
132505b84911SRob Clark 
1326c67e6279SChristian König 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
13270e08270aSSushmita Susheelendra 	if (ret) {
1328a6ae74c9SRob Clark 		msm_gem_unlock(obj);
132905b84911SRob Clark 		goto fail;
13300e08270aSSushmita Susheelendra 	}
133105b84911SRob Clark 
1332a6ae74c9SRob Clark 	msm_gem_unlock(obj);
13333cbdc8d8SAkhil P Oommen 
1334d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
133564fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1336d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
13373cbdc8d8SAkhil P Oommen 
13386ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
13396ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
13406ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
13416ed0897cSRob Clark 
1342c8afe684SRob Clark 	return obj;
1343c8afe684SRob Clark 
1344c8afe684SRob Clark fail:
1345f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1346c8afe684SRob Clark 	return ERR_PTR(ret);
1347c8afe684SRob Clark }
13488223286dSJordan Crouse 
13498223286dSJordan Crouse static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
13508223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
13518223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
13528223286dSJordan Crouse {
13538223286dSJordan Crouse 	void *vaddr;
13548223286dSJordan Crouse 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
13558223286dSJordan Crouse 	int ret;
13568223286dSJordan Crouse 
13578223286dSJordan Crouse 	if (IS_ERR(obj))
13588223286dSJordan Crouse 		return ERR_CAST(obj);
13598223286dSJordan Crouse 
13608223286dSJordan Crouse 	if (iova) {
13619fe041f6SJordan Crouse 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
136293f7abf1SJordan Crouse 		if (ret)
136393f7abf1SJordan Crouse 			goto err;
13648223286dSJordan Crouse 	}
13658223286dSJordan Crouse 
13668223286dSJordan Crouse 	vaddr = msm_gem_get_vaddr(obj);
1367c9811d0fSWei Yongjun 	if (IS_ERR(vaddr)) {
13687ad0e8cfSJordan Crouse 		msm_gem_unpin_iova(obj, aspace);
136993f7abf1SJordan Crouse 		ret = PTR_ERR(vaddr);
137093f7abf1SJordan Crouse 		goto err;
13718223286dSJordan Crouse 	}
13728223286dSJordan Crouse 
13738223286dSJordan Crouse 	if (bo)
13748223286dSJordan Crouse 		*bo = obj;
13758223286dSJordan Crouse 
13768223286dSJordan Crouse 	return vaddr;
137793f7abf1SJordan Crouse err:
137893f7abf1SJordan Crouse 	if (locked)
1379eecd7fd8SEmil Velikov 		drm_gem_object_put_locked(obj);
138093f7abf1SJordan Crouse 	else
1381f7d33950SEmil Velikov 		drm_gem_object_put(obj);
138293f7abf1SJordan Crouse 
138393f7abf1SJordan Crouse 	return ERR_PTR(ret);
138493f7abf1SJordan Crouse 
13858223286dSJordan Crouse }
13868223286dSJordan Crouse 
13878223286dSJordan Crouse void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
13888223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
13898223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova)
13908223286dSJordan Crouse {
13918223286dSJordan Crouse 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
13928223286dSJordan Crouse }
13938223286dSJordan Crouse 
13948223286dSJordan Crouse void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
13958223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
13968223286dSJordan Crouse 		struct drm_gem_object **bo, uint64_t *iova)
13978223286dSJordan Crouse {
13988223286dSJordan Crouse 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
13998223286dSJordan Crouse }
14001e29dff0SJordan Crouse 
14011e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo,
14021e29dff0SJordan Crouse 		struct msm_gem_address_space *aspace, bool locked)
14031e29dff0SJordan Crouse {
14041e29dff0SJordan Crouse 	if (IS_ERR_OR_NULL(bo))
14051e29dff0SJordan Crouse 		return;
14061e29dff0SJordan Crouse 
14071e29dff0SJordan Crouse 	msm_gem_put_vaddr(bo);
14087ad0e8cfSJordan Crouse 	msm_gem_unpin_iova(bo, aspace);
14091e29dff0SJordan Crouse 
14101e29dff0SJordan Crouse 	if (locked)
1411eecd7fd8SEmil Velikov 		drm_gem_object_put_locked(bo);
14121e29dff0SJordan Crouse 	else
1413f7d33950SEmil Velikov 		drm_gem_object_put(bo);
14141e29dff0SJordan Crouse }
14150815d774SJordan Crouse 
14160815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
14170815d774SJordan Crouse {
14180815d774SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
14190815d774SJordan Crouse 	va_list ap;
14200815d774SJordan Crouse 
14210815d774SJordan Crouse 	if (!fmt)
14220815d774SJordan Crouse 		return;
14230815d774SJordan Crouse 
14240815d774SJordan Crouse 	va_start(ap, fmt);
14250815d774SJordan Crouse 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
14260815d774SJordan Crouse 	va_end(ap);
14270815d774SJordan Crouse }
1428