xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision e7cd5ee9)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8afe684SRob Clark /*
3c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
4c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5c8afe684SRob Clark  */
6c8afe684SRob Clark 
70a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
8f8546caaSYanteng Si #include <linux/vmalloc.h>
9c8afe684SRob Clark #include <linux/spinlock.h>
10c8afe684SRob Clark #include <linux/shmem_fs.h>
1105b84911SRob Clark #include <linux/dma-buf.h>
1201c8f1c4SDan Williams #include <linux/pfn_t.h>
13c8afe684SRob Clark 
14feea39a8SSam Ravnborg #include <drm/drm_prime.h>
15feea39a8SSam Ravnborg 
16c8afe684SRob Clark #include "msm_drv.h"
17fde5de6cSRob Clark #include "msm_fence.h"
18c8afe684SRob Clark #include "msm_gem.h"
197198e6b0SRob Clark #include "msm_gpu.h"
20871d812aSRob Clark #include "msm_mmu.h"
21c8afe684SRob Clark 
22901df24dSRob Clark static void update_lru(struct drm_gem_object *obj);
230e08270aSSushmita Susheelendra 
24871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
25871d812aSRob Clark {
26871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
28871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29871d812aSRob Clark 			priv->vram.paddr;
30871d812aSRob Clark }
31871d812aSRob Clark 
32072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
33072f1f91SRob Clark {
34072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35072f1f91SRob Clark 	return !msm_obj->vram_node;
36072f1f91SRob Clark }
37072f1f91SRob Clark 
383de433c5SRob Clark /*
393de433c5SRob Clark  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
403de433c5SRob Clark  * API.  Really GPU cache is out of scope here (handled on cmdstream)
413de433c5SRob Clark  * and all we need to do is invalidate newly allocated pages before
423de433c5SRob Clark  * mapping to CPU as uncached/writecombine.
433de433c5SRob Clark  *
443de433c5SRob Clark  * On top of this, we have the added headache, that depending on
453de433c5SRob Clark  * display generation, the display's iommu may be wired up to either
463de433c5SRob Clark  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
473de433c5SRob Clark  * that here we either have dma-direct or iommu ops.
483de433c5SRob Clark  *
493de433c5SRob Clark  * Let this be a cautionary tail of abstraction gone wrong.
503de433c5SRob Clark  */
513de433c5SRob Clark 
523de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj)
533de433c5SRob Clark {
543de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
553de433c5SRob Clark 
567690a33fSMarek Szyprowski 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
573de433c5SRob Clark }
583de433c5SRob Clark 
593de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj)
603de433c5SRob Clark {
613de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
623de433c5SRob Clark 
637690a33fSMarek Szyprowski 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
643de433c5SRob Clark }
653de433c5SRob Clark 
66871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
670e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68871d812aSRob Clark {
69871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
71871d812aSRob Clark 	dma_addr_t paddr;
72871d812aSRob Clark 	struct page **p;
73871d812aSRob Clark 	int ret, i;
74871d812aSRob Clark 
752098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76871d812aSRob Clark 	if (!p)
77871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
78871d812aSRob Clark 
790e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
804e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
810e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
82871d812aSRob Clark 	if (ret) {
832098105eSMichal Hocko 		kvfree(p);
84871d812aSRob Clark 		return ERR_PTR(ret);
85871d812aSRob Clark 	}
86871d812aSRob Clark 
87871d812aSRob Clark 	paddr = physaddr(obj);
88871d812aSRob Clark 	for (i = 0; i < npages; i++) {
89b3ed524fSChristian König 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
90871d812aSRob Clark 		paddr += PAGE_SIZE;
91871d812aSRob Clark 	}
92871d812aSRob Clark 
93871d812aSRob Clark 	return p;
94871d812aSRob Clark }
95c8afe684SRob Clark 
96c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
97c8afe684SRob Clark {
98c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99c8afe684SRob Clark 
10090643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
10107fcad0dSIskren Chernev 
102c8afe684SRob Clark 	if (!msm_obj->pages) {
103c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
104871d812aSRob Clark 		struct page **p;
105c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
106c8afe684SRob Clark 
107072f1f91SRob Clark 		if (use_pages(obj))
1080cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
109871d812aSRob Clark 		else
110871d812aSRob Clark 			p = get_pages_vram(obj, npages);
111871d812aSRob Clark 
112c8afe684SRob Clark 		if (IS_ERR(p)) {
1136a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114c8afe684SRob Clark 					PTR_ERR(p));
115c8afe684SRob Clark 			return p;
116c8afe684SRob Clark 		}
117c8afe684SRob Clark 
11862e3a3e3SPrakash Kamliya 		msm_obj->pages = p;
11962e3a3e3SPrakash Kamliya 
120707d561fSGerd Hoffmann 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1211f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
12262e3a3e3SPrakash Kamliya 			void *ptr = ERR_CAST(msm_obj->sgt);
123c8afe684SRob Clark 
1246a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
12562e3a3e3SPrakash Kamliya 			msm_obj->sgt = NULL;
12662e3a3e3SPrakash Kamliya 			return ptr;
12762e3a3e3SPrakash Kamliya 		}
128c8afe684SRob Clark 
129c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
130c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
131c8afe684SRob Clark 		 */
1328b5de735SRob Clark 		if (msm_obj->flags & MSM_BO_WC)
1333de433c5SRob Clark 			sync_for_device(msm_obj);
13464fcbde7SRob Clark 
135901df24dSRob Clark 		update_lru(obj);
136c8afe684SRob Clark 	}
137c8afe684SRob Clark 
138c8afe684SRob Clark 	return msm_obj->pages;
139c8afe684SRob Clark }
140c8afe684SRob Clark 
1410e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj)
1420e08270aSSushmita Susheelendra {
1430e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1440e08270aSSushmita Susheelendra 	struct msm_drm_private *priv = obj->dev->dev_private;
1450e08270aSSushmita Susheelendra 
1460e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
1470e08270aSSushmita Susheelendra 	drm_mm_remove_node(msm_obj->vram_node);
1480e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
1490e08270aSSushmita Susheelendra 
1500e08270aSSushmita Susheelendra 	kvfree(msm_obj->pages);
1510e08270aSSushmita Susheelendra }
1520e08270aSSushmita Susheelendra 
153c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
154c8afe684SRob Clark {
155c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156c8afe684SRob Clark 
157c8afe684SRob Clark 	if (msm_obj->pages) {
1583976626eSBen Hutchings 		if (msm_obj->sgt) {
1593976626eSBen Hutchings 			/* For non-cached buffers, ensure the new
1603976626eSBen Hutchings 			 * pages are clean because display controller,
1613976626eSBen Hutchings 			 * GPU, etc. are not coherent:
162c8afe684SRob Clark 			 */
1638b5de735SRob Clark 			if (msm_obj->flags & MSM_BO_WC)
1643de433c5SRob Clark 				sync_for_cpu(msm_obj);
16562e3a3e3SPrakash Kamliya 
166c8afe684SRob Clark 			sg_free_table(msm_obj->sgt);
167c8afe684SRob Clark 			kfree(msm_obj->sgt);
168b9a31d0dSRob Clark 			msm_obj->sgt = NULL;
1693976626eSBen Hutchings 		}
170c8afe684SRob Clark 
171072f1f91SRob Clark 		if (use_pages(obj))
172c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1730e08270aSSushmita Susheelendra 		else
1740e08270aSSushmita Susheelendra 			put_pages_vram(obj);
175871d812aSRob Clark 
176c8afe684SRob Clark 		msm_obj->pages = NULL;
177c8afe684SRob Clark 	}
178c8afe684SRob Clark }
179c8afe684SRob Clark 
180*e7cd5ee9SRob Clark static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
18105b84911SRob Clark {
1820e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
18305b84911SRob Clark 	struct page **p;
1840e08270aSSushmita Susheelendra 
185*e7cd5ee9SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
1860e08270aSSushmita Susheelendra 
18790643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
1880e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
1890e08270aSSushmita Susheelendra 	}
1900e08270aSSushmita Susheelendra 
19105b84911SRob Clark 	p = get_pages(obj);
19210f76165SRob Clark 	if (!IS_ERR(p)) {
19310f76165SRob Clark 		msm_obj->pin_count++;
194901df24dSRob Clark 		update_lru(obj);
19510f76165SRob Clark 	}
19610f76165SRob Clark 
19705b84911SRob Clark 	return p;
19805b84911SRob Clark }
19905b84911SRob Clark 
200*e7cd5ee9SRob Clark struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
201*e7cd5ee9SRob Clark {
202*e7cd5ee9SRob Clark 	struct page **p;
203*e7cd5ee9SRob Clark 
204*e7cd5ee9SRob Clark 	msm_gem_lock(obj);
205*e7cd5ee9SRob Clark 	p = msm_gem_pin_pages_locked(obj);
206*e7cd5ee9SRob Clark 	msm_gem_unlock(obj);
207*e7cd5ee9SRob Clark 
208*e7cd5ee9SRob Clark 	return p;
209*e7cd5ee9SRob Clark }
210*e7cd5ee9SRob Clark 
211*e7cd5ee9SRob Clark void msm_gem_unpin_pages(struct drm_gem_object *obj)
21205b84911SRob Clark {
21310f76165SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
21410f76165SRob Clark 
21510f76165SRob Clark 	msm_gem_lock(obj);
21610f76165SRob Clark 	msm_obj->pin_count--;
21710f76165SRob Clark 	GEM_WARN_ON(msm_obj->pin_count < 0);
218901df24dSRob Clark 	update_lru(obj);
21910f76165SRob Clark 	msm_gem_unlock(obj);
22005b84911SRob Clark }
22105b84911SRob Clark 
222af9b3547SJonathan Marek static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
223af9b3547SJonathan Marek {
2248b5de735SRob Clark 	if (msm_obj->flags & MSM_BO_WC)
225af9b3547SJonathan Marek 		return pgprot_writecombine(prot);
226af9b3547SJonathan Marek 	return prot;
227af9b3547SJonathan Marek }
228af9b3547SJonathan Marek 
2293c9edd9cSThomas Zimmermann static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
230c8afe684SRob Clark {
23111bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
232c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
2330e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
234c8afe684SRob Clark 	struct page **pages;
235c8afe684SRob Clark 	unsigned long pfn;
236c8afe684SRob Clark 	pgoff_t pgoff;
237a5f74ec7SSouptick Joarder 	int err;
238a5f74ec7SSouptick Joarder 	vm_fault_t ret;
239c8afe684SRob Clark 
2400e08270aSSushmita Susheelendra 	/*
2410e08270aSSushmita Susheelendra 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
2420e08270aSSushmita Susheelendra 	 * a reference on obj. So, we dont need to hold one here.
243d78d383aSRob Clark 	 */
244a6ae74c9SRob Clark 	err = msm_gem_lock_interruptible(obj);
245a5f74ec7SSouptick Joarder 	if (err) {
246a5f74ec7SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
247c8afe684SRob Clark 		goto out;
248a5f74ec7SSouptick Joarder 	}
249c8afe684SRob Clark 
25090643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
251a6ae74c9SRob Clark 		msm_gem_unlock(obj);
2520e08270aSSushmita Susheelendra 		return VM_FAULT_SIGBUS;
2530e08270aSSushmita Susheelendra 	}
2540e08270aSSushmita Susheelendra 
255c8afe684SRob Clark 	/* make sure we have pages attached now */
256c8afe684SRob Clark 	pages = get_pages(obj);
257c8afe684SRob Clark 	if (IS_ERR(pages)) {
258a5f74ec7SSouptick Joarder 		ret = vmf_error(PTR_ERR(pages));
259c8afe684SRob Clark 		goto out_unlock;
260c8afe684SRob Clark 	}
261c8afe684SRob Clark 
262c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2631a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
264c8afe684SRob Clark 
265871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
266c8afe684SRob Clark 
2671a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
268c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
269c8afe684SRob Clark 
2708246913aSRob Clark 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
2718246913aSRob Clark 
272c8afe684SRob Clark out_unlock:
273a6ae74c9SRob Clark 	msm_gem_unlock(obj);
274c8afe684SRob Clark out:
275a5f74ec7SSouptick Joarder 	return ret;
276c8afe684SRob Clark }
277c8afe684SRob Clark 
278c8afe684SRob Clark /** get mmap offset */
279c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
280c8afe684SRob Clark {
281c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
282c8afe684SRob Clark 	int ret;
283c8afe684SRob Clark 
28490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
285c8afe684SRob Clark 
286c8afe684SRob Clark 	/* Make it mmapable */
287c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
288c8afe684SRob Clark 
289c8afe684SRob Clark 	if (ret) {
2906a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
291c8afe684SRob Clark 		return 0;
292c8afe684SRob Clark 	}
293c8afe684SRob Clark 
294c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
295c8afe684SRob Clark }
296c8afe684SRob Clark 
297c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
298c8afe684SRob Clark {
299c8afe684SRob Clark 	uint64_t offset;
3000e08270aSSushmita Susheelendra 
301a6ae74c9SRob Clark 	msm_gem_lock(obj);
302c8afe684SRob Clark 	offset = mmap_offset(obj);
303a6ae74c9SRob Clark 	msm_gem_unlock(obj);
304c8afe684SRob Clark 	return offset;
305c8afe684SRob Clark }
306c8afe684SRob Clark 
3074b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
3084b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3094b85f7f5SRob Clark {
3104b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3114b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3124b85f7f5SRob Clark 
31390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3140e08270aSSushmita Susheelendra 
3154b85f7f5SRob Clark 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3164b85f7f5SRob Clark 	if (!vma)
3174b85f7f5SRob Clark 		return ERR_PTR(-ENOMEM);
3184b85f7f5SRob Clark 
3194b85f7f5SRob Clark 	vma->aspace = aspace;
3204b85f7f5SRob Clark 
3214b85f7f5SRob Clark 	list_add_tail(&vma->list, &msm_obj->vmas);
3224b85f7f5SRob Clark 
3234b85f7f5SRob Clark 	return vma;
3244b85f7f5SRob Clark }
3254b85f7f5SRob Clark 
3264b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
3274b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3284b85f7f5SRob Clark {
3294b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3304b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3314b85f7f5SRob Clark 
33290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3334b85f7f5SRob Clark 
3344b85f7f5SRob Clark 	list_for_each_entry(vma, &msm_obj->vmas, list) {
3354b85f7f5SRob Clark 		if (vma->aspace == aspace)
3364b85f7f5SRob Clark 			return vma;
3374b85f7f5SRob Clark 	}
3384b85f7f5SRob Clark 
3394b85f7f5SRob Clark 	return NULL;
3404b85f7f5SRob Clark }
3414b85f7f5SRob Clark 
3424b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma)
3434b85f7f5SRob Clark {
3444b85f7f5SRob Clark 	if (!vma)
3454b85f7f5SRob Clark 		return;
3464b85f7f5SRob Clark 
3474b85f7f5SRob Clark 	list_del(&vma->list);
3484b85f7f5SRob Clark 	kfree(vma);
3494b85f7f5SRob Clark }
3504b85f7f5SRob Clark 
35137c68900SLee Jones /*
35220d0ae2fSRob Clark  * If close is true, this also closes the VMA (releasing the allocated
35320d0ae2fSRob Clark  * iova range) in addition to removing the iommu mapping.  In the eviction
35420d0ae2fSRob Clark  * case (!close), we keep the iova allocated, but only remove the iommu
35520d0ae2fSRob Clark  * mapping.
35620d0ae2fSRob Clark  */
3574fe5f65eSRob Clark static void
35820d0ae2fSRob Clark put_iova_spaces(struct drm_gem_object *obj, bool close)
3594fe5f65eSRob Clark {
3604fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3619b73bde3SIskren Chernev 	struct msm_gem_vma *vma;
3624fe5f65eSRob Clark 
36390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3644fe5f65eSRob Clark 
3659b73bde3SIskren Chernev 	list_for_each_entry(vma, &msm_obj->vmas, list) {
366d67f1b6dSBrian Masney 		if (vma->aspace) {
3677ad0e8cfSJordan Crouse 			msm_gem_purge_vma(vma->aspace, vma);
36820d0ae2fSRob Clark 			if (close)
3697ad0e8cfSJordan Crouse 				msm_gem_close_vma(vma->aspace, vma);
370d67f1b6dSBrian Masney 		}
3719b73bde3SIskren Chernev 	}
3729b73bde3SIskren Chernev }
3739b73bde3SIskren Chernev 
3749b73bde3SIskren Chernev /* Called with msm_obj locked */
3759b73bde3SIskren Chernev static void
3769b73bde3SIskren Chernev put_iova_vmas(struct drm_gem_object *obj)
3774fe5f65eSRob Clark {
3784fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3794fe5f65eSRob Clark 	struct msm_gem_vma *vma, *tmp;
3804fe5f65eSRob Clark 
38190643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3824fe5f65eSRob Clark 
3834fe5f65eSRob Clark 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
3844b85f7f5SRob Clark 		del_vma(vma);
3854fe5f65eSRob Clark 	}
3864fe5f65eSRob Clark }
3874fe5f65eSRob Clark 
388522f1abfSRob Clark static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
389522f1abfSRob Clark 		struct msm_gem_address_space *aspace,
390d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
391c8afe684SRob Clark {
3924b85f7f5SRob Clark 	struct msm_gem_vma *vma;
393c8afe684SRob Clark 
39490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
395cb1e3818SRob Clark 
3964b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
397871d812aSRob Clark 
3984b85f7f5SRob Clark 	if (!vma) {
399522f1abfSRob Clark 		int ret;
400522f1abfSRob Clark 
4014b85f7f5SRob Clark 		vma = add_vma(obj, aspace);
402c0ee9794SJordan Crouse 		if (IS_ERR(vma))
403522f1abfSRob Clark 			return vma;
4044b85f7f5SRob Clark 
4052ee4b5d2SRob Clark 		ret = msm_gem_init_vma(aspace, vma, obj->size,
406d3b8877eSJonathan Marek 			range_start, range_end);
407c0ee9794SJordan Crouse 		if (ret) {
408c0ee9794SJordan Crouse 			del_vma(vma);
409522f1abfSRob Clark 			return ERR_PTR(ret);
410c8afe684SRob Clark 		}
411522f1abfSRob Clark 	} else {
412522f1abfSRob Clark 		GEM_WARN_ON(vma->iova < range_start);
413522f1abfSRob Clark 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
4144b85f7f5SRob Clark 	}
4154b85f7f5SRob Clark 
416522f1abfSRob Clark 	return vma;
417c0ee9794SJordan Crouse }
4184b85f7f5SRob Clark 
41927674c66SRob Clark int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
420c0ee9794SJordan Crouse {
421c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
422c0ee9794SJordan Crouse 	struct page **pages;
42364fcbde7SRob Clark 	int ret, prot = IOMMU_READ;
424bbc2cd07SRob Clark 
425bbc2cd07SRob Clark 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
426bbc2cd07SRob Clark 		prot |= IOMMU_WRITE;
427c0ee9794SJordan Crouse 
4280b462d7aSJonathan Marek 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
4290b462d7aSJonathan Marek 		prot |= IOMMU_PRIV;
4300b462d7aSJonathan Marek 
431d12e3390SJonathan Marek 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
432d12e3390SJonathan Marek 		prot |= IOMMU_CACHE;
433d12e3390SJonathan Marek 
43490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
435c0ee9794SJordan Crouse 
43690643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
437c0ee9794SJordan Crouse 		return -EBUSY;
438c0ee9794SJordan Crouse 
439c0ee9794SJordan Crouse 	pages = get_pages(obj);
440c0ee9794SJordan Crouse 	if (IS_ERR(pages))
441c0ee9794SJordan Crouse 		return PTR_ERR(pages);
442c0ee9794SJordan Crouse 
443522f1abfSRob Clark 	ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
44464fcbde7SRob Clark 
44564fcbde7SRob Clark 	if (!ret)
44664fcbde7SRob Clark 		msm_obj->pin_count++;
44764fcbde7SRob Clark 
44864fcbde7SRob Clark 	return ret;
449c0ee9794SJordan Crouse }
450c0ee9794SJordan Crouse 
451311e03c2SRob Clark void msm_gem_unpin_locked(struct drm_gem_object *obj)
45227674c66SRob Clark {
45327674c66SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
45427674c66SRob Clark 
45527674c66SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
45627674c66SRob Clark 
45727674c66SRob Clark 	msm_obj->pin_count--;
45827674c66SRob Clark 	GEM_WARN_ON(msm_obj->pin_count < 0);
45927674c66SRob Clark 
460901df24dSRob Clark 	update_lru(obj);
46127674c66SRob Clark }
46227674c66SRob Clark 
46327674c66SRob Clark struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
46427674c66SRob Clark 					   struct msm_gem_address_space *aspace)
46527674c66SRob Clark {
46627674c66SRob Clark 	return get_vma_locked(obj, aspace, 0, U64_MAX);
46727674c66SRob Clark }
46827674c66SRob Clark 
469e4b87d22SRob Clark static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
470d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
471d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
472c0ee9794SJordan Crouse {
473522f1abfSRob Clark 	struct msm_gem_vma *vma;
474c0ee9794SJordan Crouse 	int ret;
475c0ee9794SJordan Crouse 
47690643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
477c0ee9794SJordan Crouse 
478522f1abfSRob Clark 	vma = get_vma_locked(obj, aspace, range_start, range_end);
479522f1abfSRob Clark 	if (IS_ERR(vma))
480522f1abfSRob Clark 		return PTR_ERR(vma);
481c0ee9794SJordan Crouse 
48227674c66SRob Clark 	ret = msm_gem_pin_vma_locked(obj, vma);
483c0ee9794SJordan Crouse 	if (!ret)
484522f1abfSRob Clark 		*iova = vma->iova;
485c0ee9794SJordan Crouse 
486c8afe684SRob Clark 	return ret;
487c8afe684SRob Clark }
488c8afe684SRob Clark 
489e4b87d22SRob Clark /*
490e4b87d22SRob Clark  * get iova and pin it. Should have a matching put
491e4b87d22SRob Clark  * limits iova to specified range (in pages)
492e4b87d22SRob Clark  */
493e4b87d22SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
494e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova,
495e4b87d22SRob Clark 		u64 range_start, u64 range_end)
496e4b87d22SRob Clark {
497e4b87d22SRob Clark 	int ret;
498e4b87d22SRob Clark 
499e4b87d22SRob Clark 	msm_gem_lock(obj);
500e4b87d22SRob Clark 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
501e4b87d22SRob Clark 	msm_gem_unlock(obj);
502e4b87d22SRob Clark 
503e4b87d22SRob Clark 	return ret;
504e4b87d22SRob Clark }
505e4b87d22SRob Clark 
506d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */
507d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
508d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova)
509d3b8877eSJonathan Marek {
510d3b8877eSJonathan Marek 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
511d3b8877eSJonathan Marek }
512d3b8877eSJonathan Marek 
5137ad0e8cfSJordan Crouse /*
5147ad0e8cfSJordan Crouse  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
5157ad0e8cfSJordan Crouse  * valid for the life of the object
5167ad0e8cfSJordan Crouse  */
5179fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj,
5189fe041f6SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
5199fe041f6SJordan Crouse {
520522f1abfSRob Clark 	struct msm_gem_vma *vma;
521522f1abfSRob Clark 	int ret = 0;
5229fe041f6SJordan Crouse 
523a6ae74c9SRob Clark 	msm_gem_lock(obj);
524522f1abfSRob Clark 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
525522f1abfSRob Clark 	if (IS_ERR(vma)) {
526522f1abfSRob Clark 		ret = PTR_ERR(vma);
527522f1abfSRob Clark 	} else {
528522f1abfSRob Clark 		*iova = vma->iova;
529522f1abfSRob Clark 	}
530a6ae74c9SRob Clark 	msm_gem_unlock(obj);
5319fe041f6SJordan Crouse 
5329fe041f6SJordan Crouse 	return ret;
5339fe041f6SJordan Crouse }
5349fe041f6SJordan Crouse 
535a636a0ffSRob Clark static int clear_iova(struct drm_gem_object *obj,
5368bdcd949SRob Clark 		      struct msm_gem_address_space *aspace)
5372638d90aSRob Clark {
538a636a0ffSRob Clark 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
5394b85f7f5SRob Clark 
540a636a0ffSRob Clark 	if (!vma)
541a636a0ffSRob Clark 		return 0;
5424b85f7f5SRob Clark 
543a636a0ffSRob Clark 	if (msm_gem_vma_inuse(vma))
544a636a0ffSRob Clark 		return -EBUSY;
545a636a0ffSRob Clark 
546a636a0ffSRob Clark 	msm_gem_purge_vma(vma->aspace, vma);
547a636a0ffSRob Clark 	msm_gem_close_vma(vma->aspace, vma);
548a636a0ffSRob Clark 	del_vma(vma);
549a636a0ffSRob Clark 
550a636a0ffSRob Clark 	return 0;
5512638d90aSRob Clark }
5522638d90aSRob Clark 
5537ad0e8cfSJordan Crouse /*
554a636a0ffSRob Clark  * Get the requested iova but don't pin it.  Fails if the requested iova is
555a636a0ffSRob Clark  * not available.  Doesn't need a put because iovas are currently valid for
556a636a0ffSRob Clark  * the life of the object.
557a636a0ffSRob Clark  *
558a636a0ffSRob Clark  * Setting an iova of zero will clear the vma.
559e4b87d22SRob Clark  */
560a636a0ffSRob Clark int msm_gem_set_iova(struct drm_gem_object *obj,
561a636a0ffSRob Clark 		     struct msm_gem_address_space *aspace, uint64_t iova)
562e4b87d22SRob Clark {
563a636a0ffSRob Clark 	int ret = 0;
564a636a0ffSRob Clark 
565a636a0ffSRob Clark 	msm_gem_lock(obj);
566a636a0ffSRob Clark 	if (!iova) {
567a636a0ffSRob Clark 		ret = clear_iova(obj, aspace);
568a636a0ffSRob Clark 	} else {
569e4b87d22SRob Clark 		struct msm_gem_vma *vma;
570a636a0ffSRob Clark 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
571a636a0ffSRob Clark 		if (IS_ERR(vma)) {
572a636a0ffSRob Clark 			ret = PTR_ERR(vma);
573a636a0ffSRob Clark 		} else if (GEM_WARN_ON(vma->iova != iova)) {
574a636a0ffSRob Clark 			clear_iova(obj, aspace);
575a636a0ffSRob Clark 			ret = -EBUSY;
57664fcbde7SRob Clark 		}
577e4b87d22SRob Clark 	}
578a636a0ffSRob Clark 	msm_gem_unlock(obj);
579a636a0ffSRob Clark 
580a636a0ffSRob Clark 	return ret;
581a636a0ffSRob Clark }
582e4b87d22SRob Clark 
583e4b87d22SRob Clark /*
5847ad0e8cfSJordan Crouse  * Unpin a iova by updating the reference counts. The memory isn't actually
5857ad0e8cfSJordan Crouse  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
5867ad0e8cfSJordan Crouse  * to get rid of it
5877ad0e8cfSJordan Crouse  */
5887ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj,
5898bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
590c8afe684SRob Clark {
59127674c66SRob Clark 	struct msm_gem_vma *vma;
59227674c66SRob Clark 
593a6ae74c9SRob Clark 	msm_gem_lock(obj);
59427674c66SRob Clark 	vma = lookup_vma(obj, aspace);
59527674c66SRob Clark 	if (!GEM_WARN_ON(!vma)) {
596311e03c2SRob Clark 		msm_gem_unpin_vma(vma);
597311e03c2SRob Clark 		msm_gem_unpin_locked(obj);
59827674c66SRob Clark 	}
599a6ae74c9SRob Clark 	msm_gem_unlock(obj);
600c8afe684SRob Clark }
601c8afe684SRob Clark 
602c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
603c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
604c8afe684SRob Clark {
605c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
606c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
607c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
6080815d774SJordan Crouse 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
609c8afe684SRob Clark }
610c8afe684SRob Clark 
611c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
612c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
613c8afe684SRob Clark {
614c8afe684SRob Clark 	struct drm_gem_object *obj;
615c8afe684SRob Clark 	int ret = 0;
616c8afe684SRob Clark 
617c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
618a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
619c8afe684SRob Clark 	if (obj == NULL) {
620c8afe684SRob Clark 		ret = -ENOENT;
621c8afe684SRob Clark 		goto fail;
622c8afe684SRob Clark 	}
623c8afe684SRob Clark 
624c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
625c8afe684SRob Clark 
626f7d33950SEmil Velikov 	drm_gem_object_put(obj);
627c8afe684SRob Clark 
628c8afe684SRob Clark fail:
629c8afe684SRob Clark 	return ret;
630c8afe684SRob Clark }
631c8afe684SRob Clark 
632fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
633c8afe684SRob Clark {
6340e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6350e08270aSSushmita Susheelendra 	int ret = 0;
6360e08270aSSushmita Susheelendra 
63790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
638e4b87d22SRob Clark 
6398b6b7d84SDaniel Vetter 	if (obj->import_attach)
6408b6b7d84SDaniel Vetter 		return ERR_PTR(-ENODEV);
6418b6b7d84SDaniel Vetter 
64290643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
6436a41da17SMamta Shukla 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
644fad33f4bSRob Clark 			msm_obj->madv, madv);
6450e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
646c8afe684SRob Clark 	}
647c8afe684SRob Clark 
6480e08270aSSushmita Susheelendra 	/* increment vmap_count *before* vmap() call, so shrinker can
649a6ae74c9SRob Clark 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
6500e08270aSSushmita Susheelendra 	 * This guarantees that we won't try to msm_gem_vunmap() this
6510e08270aSSushmita Susheelendra 	 * same object from within the vmap() call (while we already
652a6ae74c9SRob Clark 	 * hold msm_obj lock)
6530e08270aSSushmita Susheelendra 	 */
6540e08270aSSushmita Susheelendra 	msm_obj->vmap_count++;
6550e08270aSSushmita Susheelendra 
6560e08270aSSushmita Susheelendra 	if (!msm_obj->vaddr) {
6570e08270aSSushmita Susheelendra 		struct page **pages = get_pages(obj);
6580e08270aSSushmita Susheelendra 		if (IS_ERR(pages)) {
6590e08270aSSushmita Susheelendra 			ret = PTR_ERR(pages);
6600e08270aSSushmita Susheelendra 			goto fail;
6610e08270aSSushmita Susheelendra 		}
6620e08270aSSushmita Susheelendra 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
663af9b3547SJonathan Marek 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
6640e08270aSSushmita Susheelendra 		if (msm_obj->vaddr == NULL) {
6650e08270aSSushmita Susheelendra 			ret = -ENOMEM;
6660e08270aSSushmita Susheelendra 			goto fail;
6670e08270aSSushmita Susheelendra 		}
66810f76165SRob Clark 
669901df24dSRob Clark 		update_lru(obj);
6700e08270aSSushmita Susheelendra 	}
6710e08270aSSushmita Susheelendra 
6720e08270aSSushmita Susheelendra 	return msm_obj->vaddr;
6730e08270aSSushmita Susheelendra 
6740e08270aSSushmita Susheelendra fail:
675e1e9db2cSRob Clark 	msm_obj->vmap_count--;
6760e08270aSSushmita Susheelendra 	return ERR_PTR(ret);
67718f23049SRob Clark }
67818f23049SRob Clark 
679e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
680e4b87d22SRob Clark {
681e4b87d22SRob Clark 	return get_vaddr(obj, MSM_MADV_WILLNEED);
682e4b87d22SRob Clark }
683e4b87d22SRob Clark 
684fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
685fad33f4bSRob Clark {
686e4b87d22SRob Clark 	void *ret;
687e4b87d22SRob Clark 
688e4b87d22SRob Clark 	msm_gem_lock(obj);
689e4b87d22SRob Clark 	ret = msm_gem_get_vaddr_locked(obj);
690e4b87d22SRob Clark 	msm_gem_unlock(obj);
691e4b87d22SRob Clark 
692e4b87d22SRob Clark 	return ret;
693fad33f4bSRob Clark }
694fad33f4bSRob Clark 
695fad33f4bSRob Clark /*
696fad33f4bSRob Clark  * Don't use this!  It is for the very special case of dumping
697fad33f4bSRob Clark  * submits from GPU hangs or faults, were the bo may already
698fad33f4bSRob Clark  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
699fad33f4bSRob Clark  * active list.
700fad33f4bSRob Clark  */
701fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
702fad33f4bSRob Clark {
703fad33f4bSRob Clark 	return get_vaddr(obj, __MSM_MADV_PURGED);
704fad33f4bSRob Clark }
705fad33f4bSRob Clark 
706e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
70718f23049SRob Clark {
7080e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
7090e08270aSSushmita Susheelendra 
71090643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
71190643a24SRob Clark 	GEM_WARN_ON(msm_obj->vmap_count < 1);
712e4b87d22SRob Clark 
7130e08270aSSushmita Susheelendra 	msm_obj->vmap_count--;
7144cd33c48SRob Clark }
7150e08270aSSushmita Susheelendra 
7160e08270aSSushmita Susheelendra void msm_gem_put_vaddr(struct drm_gem_object *obj)
7174cd33c48SRob Clark {
718a6ae74c9SRob Clark 	msm_gem_lock(obj);
719e4b87d22SRob Clark 	msm_gem_put_vaddr_locked(obj);
720a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7214cd33c48SRob Clark }
7224cd33c48SRob Clark 
7234cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
7244cd33c48SRob Clark  * false or -errno.
7254cd33c48SRob Clark  */
7264cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
7274cd33c48SRob Clark {
7284cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
72968209390SRob Clark 
730a6ae74c9SRob Clark 	msm_gem_lock(obj);
731c8afe684SRob Clark 
732c8afe684SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
733c8afe684SRob Clark 		msm_obj->madv = madv;
734c8afe684SRob Clark 
7350e08270aSSushmita Susheelendra 	madv = msm_obj->madv;
7360e08270aSSushmita Susheelendra 
7373edfa30fSRob Clark 	/* If the obj is inactive, we might need to move it
7383edfa30fSRob Clark 	 * between inactive lists
7393edfa30fSRob Clark 	 */
7403edfa30fSRob Clark 	if (msm_obj->active_count == 0)
741901df24dSRob Clark 		update_lru(obj);
7423edfa30fSRob Clark 
743a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7440e08270aSSushmita Susheelendra 
7450e08270aSSushmita Susheelendra 	return (madv != __MSM_MADV_PURGED);
746c8afe684SRob Clark }
747c8afe684SRob Clark 
748599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj)
74968209390SRob Clark {
75068209390SRob Clark 	struct drm_device *dev = obj->dev;
75168209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
75268209390SRob Clark 
75381d4d597SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
75490643a24SRob Clark 	GEM_WARN_ON(!is_purgeable(msm_obj));
75568209390SRob Clark 
75620d0ae2fSRob Clark 	/* Get rid of any iommu mapping(s): */
75720d0ae2fSRob Clark 	put_iova_spaces(obj, true);
7580e08270aSSushmita Susheelendra 
759599089c6SRob Clark 	msm_gem_vunmap(obj);
76068209390SRob Clark 
76181d4d597SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
76281d4d597SRob Clark 
76368209390SRob Clark 	put_pages(obj);
76468209390SRob Clark 
7659b73bde3SIskren Chernev 	put_iova_vmas(obj);
7669b73bde3SIskren Chernev 
76768209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
768901df24dSRob Clark 	update_lru(obj);
76968209390SRob Clark 
77068209390SRob Clark 	drm_gem_free_mmap_offset(obj);
77168209390SRob Clark 
77268209390SRob Clark 	/* Our goal here is to return as much of the memory as
77368209390SRob Clark 	 * is possible back to the system as we are called from OOM.
77468209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
77568209390SRob Clark 	 * backing pages, *now*.
77668209390SRob Clark 	 */
77768209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
77868209390SRob Clark 
77968209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
78068209390SRob Clark 			0, (loff_t)-1);
78168209390SRob Clark }
78268209390SRob Clark 
78337c68900SLee Jones /*
78463f17ef8SRob Clark  * Unpin the backing pages and make them available to be swapped out.
78563f17ef8SRob Clark  */
78663f17ef8SRob Clark void msm_gem_evict(struct drm_gem_object *obj)
78763f17ef8SRob Clark {
78863f17ef8SRob Clark 	struct drm_device *dev = obj->dev;
78963f17ef8SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
79063f17ef8SRob Clark 
79163f17ef8SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
79263f17ef8SRob Clark 	GEM_WARN_ON(is_unevictable(msm_obj));
79363f17ef8SRob Clark 	GEM_WARN_ON(!msm_obj->evictable);
79463f17ef8SRob Clark 	GEM_WARN_ON(msm_obj->active_count);
79563f17ef8SRob Clark 
79663f17ef8SRob Clark 	/* Get rid of any iommu mapping(s): */
79763f17ef8SRob Clark 	put_iova_spaces(obj, false);
79863f17ef8SRob Clark 
79963f17ef8SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
80063f17ef8SRob Clark 
80163f17ef8SRob Clark 	put_pages(obj);
80263f17ef8SRob Clark 
803901df24dSRob Clark 	update_lru(obj);
80463f17ef8SRob Clark }
80563f17ef8SRob Clark 
806599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj)
807e1e9db2cSRob Clark {
808e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
809e1e9db2cSRob Clark 
81090643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
8110e08270aSSushmita Susheelendra 
81290643a24SRob Clark 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
813e1e9db2cSRob Clark 		return;
814e1e9db2cSRob Clark 
815e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
816e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
817e1e9db2cSRob Clark }
818e1e9db2cSRob Clark 
8199d8baa2bSAkhil P Oommen void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
8207198e6b0SRob Clark {
8217198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
822d984457bSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
823d984457bSRob Clark 
824d984457bSRob Clark 	might_sleep();
82590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
82690643a24SRob Clark 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
82790643a24SRob Clark 	GEM_WARN_ON(msm_obj->dontneed);
8289d8baa2bSAkhil P Oommen 
829ab5c54cbSRob Clark 	if (msm_obj->active_count++ == 0) {
830d984457bSRob Clark 		mutex_lock(&priv->mm_lock);
83164fcbde7SRob Clark 		if (msm_obj->evictable)
83264fcbde7SRob Clark 			mark_unevictable(msm_obj);
833a83cc4fbSBaokun Li 		list_move_tail(&msm_obj->mm_list, &gpu->active_list);
834d984457bSRob Clark 		mutex_unlock(&priv->mm_lock);
8357198e6b0SRob Clark 	}
8369d8baa2bSAkhil P Oommen }
8377198e6b0SRob Clark 
8389d8baa2bSAkhil P Oommen void msm_gem_active_put(struct drm_gem_object *obj)
8397198e6b0SRob Clark {
8407198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
8417198e6b0SRob Clark 
842d984457bSRob Clark 	might_sleep();
84390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
8447198e6b0SRob Clark 
845ab5c54cbSRob Clark 	if (--msm_obj->active_count == 0) {
846901df24dSRob Clark 		update_lru(obj);
8477198e6b0SRob Clark 	}
8489d8baa2bSAkhil P Oommen }
8497198e6b0SRob Clark 
850901df24dSRob Clark static void update_lru(struct drm_gem_object *obj)
8513edfa30fSRob Clark {
852901df24dSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
853901df24dSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
8543edfa30fSRob Clark 
85564fcbde7SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
85664fcbde7SRob Clark 
85764fcbde7SRob Clark 	if (msm_obj->active_count != 0)
85864fcbde7SRob Clark 		return;
85964fcbde7SRob Clark 
8603edfa30fSRob Clark 	mutex_lock(&priv->mm_lock);
8613edfa30fSRob Clark 
862cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
8630054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
86464fcbde7SRob Clark 	if (msm_obj->evictable)
86564fcbde7SRob Clark 		mark_unevictable(msm_obj);
866cc8a4d5aSRob Clark 
867cc8a4d5aSRob Clark 	list_del(&msm_obj->mm_list);
86864fcbde7SRob Clark 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
8693edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
87064fcbde7SRob Clark 		mark_evictable(msm_obj);
871cc8a4d5aSRob Clark 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
8723edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
8730054eeb7SRob Clark 		mark_purgeable(msm_obj);
874cc8a4d5aSRob Clark 	} else {
87564fcbde7SRob Clark 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
87664fcbde7SRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
877cc8a4d5aSRob Clark 	}
8783edfa30fSRob Clark 
8793edfa30fSRob Clark 	mutex_unlock(&priv->mm_lock);
8803edfa30fSRob Clark }
8813edfa30fSRob Clark 
88201780d02SRob Clark bool msm_gem_active(struct drm_gem_object *obj)
88301780d02SRob Clark {
88401780d02SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
88501780d02SRob Clark 
88601780d02SRob Clark 	if (to_msm_bo(obj)->pin_count)
88701780d02SRob Clark 		return true;
88801780d02SRob Clark 
88901780d02SRob Clark 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
89001780d02SRob Clark }
89101780d02SRob Clark 
892ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
893ba00c3f2SRob Clark {
894b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
895f755e227SChris Wilson 	unsigned long remain =
896f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
897f755e227SChris Wilson 	long ret;
898b6295f9aSRob Clark 
8997bc80a54SChristian König 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
9007bc80a54SChristian König 				    true,  remain);
901f755e227SChris Wilson 	if (ret == 0)
902f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
903f755e227SChris Wilson 	else if (ret < 0)
904f755e227SChris Wilson 		return ret;
905ba00c3f2SRob Clark 
9067198e6b0SRob Clark 	/* TODO cache maintenance */
9077198e6b0SRob Clark 
908b6295f9aSRob Clark 	return 0;
9097198e6b0SRob Clark }
9107198e6b0SRob Clark 
9117198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
9127198e6b0SRob Clark {
9137198e6b0SRob Clark 	/* TODO cache maintenance */
914c8afe684SRob Clark 	return 0;
915c8afe684SRob Clark }
916c8afe684SRob Clark 
917c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
918528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
919528107c8SRob Clark 		struct msm_gem_stats *stats)
920c8afe684SRob Clark {
921c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
92252791eeeSChristian König 	struct dma_resv *robj = obj->resv;
9234b85f7f5SRob Clark 	struct msm_gem_vma *vma;
924c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
9254cd33c48SRob Clark 	const char *madv;
926c8afe684SRob Clark 
927a6ae74c9SRob Clark 	msm_gem_lock(obj);
928b6295f9aSRob Clark 
929528107c8SRob Clark 	stats->all.count++;
930528107c8SRob Clark 	stats->all.size += obj->size;
931528107c8SRob Clark 
932528107c8SRob Clark 	if (is_active(msm_obj)) {
933528107c8SRob Clark 		stats->active.count++;
934528107c8SRob Clark 		stats->active.size += obj->size;
935528107c8SRob Clark 	}
936528107c8SRob Clark 
937f48f3563SRob Clark 	if (msm_obj->pages) {
938f48f3563SRob Clark 		stats->resident.count++;
939f48f3563SRob Clark 		stats->resident.size += obj->size;
940f48f3563SRob Clark 	}
941f48f3563SRob Clark 
9424cd33c48SRob Clark 	switch (msm_obj->madv) {
9434cd33c48SRob Clark 	case __MSM_MADV_PURGED:
944528107c8SRob Clark 		stats->purged.count++;
945528107c8SRob Clark 		stats->purged.size += obj->size;
9464cd33c48SRob Clark 		madv = " purged";
9474cd33c48SRob Clark 		break;
9484cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
9490054eeb7SRob Clark 		stats->purgeable.count++;
9500054eeb7SRob Clark 		stats->purgeable.size += obj->size;
9514cd33c48SRob Clark 		madv = " purgeable";
9524cd33c48SRob Clark 		break;
9534cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
9544cd33c48SRob Clark 	default:
9554cd33c48SRob Clark 		madv = "";
9564cd33c48SRob Clark 		break;
9574cd33c48SRob Clark 	}
9584cd33c48SRob Clark 
959575f0485SJordan Crouse 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
9607198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
9612c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
962667ce33eSRob Clark 			off, msm_obj->vaddr);
963667ce33eSRob Clark 
9640815d774SJordan Crouse 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
965667ce33eSRob Clark 
966575f0485SJordan Crouse 	if (!list_empty(&msm_obj->vmas)) {
967575f0485SJordan Crouse 
968575f0485SJordan Crouse 		seq_puts(m, "      vmas:");
969575f0485SJordan Crouse 
97025faf2f2SRob Clark 		list_for_each_entry(vma, &msm_obj->vmas, list) {
97125faf2f2SRob Clark 			const char *name, *comm;
97225faf2f2SRob Clark 			if (vma->aspace) {
97325faf2f2SRob Clark 				struct msm_gem_address_space *aspace = vma->aspace;
97425faf2f2SRob Clark 				struct task_struct *task =
97525faf2f2SRob Clark 					get_pid_task(aspace->pid, PIDTYPE_PID);
97625faf2f2SRob Clark 				if (task) {
97725faf2f2SRob Clark 					comm = kstrdup(task->comm, GFP_KERNEL);
978ac3e4f42SRob Clark 					put_task_struct(task);
97925faf2f2SRob Clark 				} else {
98025faf2f2SRob Clark 					comm = NULL;
98125faf2f2SRob Clark 				}
98225faf2f2SRob Clark 				name = aspace->name;
98325faf2f2SRob Clark 			} else {
98425faf2f2SRob Clark 				name = comm = NULL;
98525faf2f2SRob Clark 			}
98625faf2f2SRob Clark 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
98725faf2f2SRob Clark 				name, comm ? ":" : "", comm ? comm : "",
98825faf2f2SRob Clark 				vma->aspace, vma->iova,
98925faf2f2SRob Clark 				vma->mapped ? "mapped" : "unmapped",
990ca35ab2aSRob Clark 				msm_gem_vma_inuse(vma));
99125faf2f2SRob Clark 			kfree(comm);
99225faf2f2SRob Clark 		}
993575f0485SJordan Crouse 
994575f0485SJordan Crouse 		seq_puts(m, "\n");
995575f0485SJordan Crouse 	}
996b6295f9aSRob Clark 
997f19ee2f3SChristian König 	dma_resv_describe(robj, m);
998a6ae74c9SRob Clark 	msm_gem_unlock(obj);
999c8afe684SRob Clark }
1000c8afe684SRob Clark 
1001c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1002c8afe684SRob Clark {
1003528107c8SRob Clark 	struct msm_gem_stats stats = {};
1004c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1005c8afe684SRob Clark 
10060815d774SJordan Crouse 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
10076ed0897cSRob Clark 	list_for_each_entry(msm_obj, list, node) {
1008c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
1009575f0485SJordan Crouse 		seq_puts(m, "   ");
1010528107c8SRob Clark 		msm_gem_describe(obj, m, &stats);
1011c8afe684SRob Clark 	}
1012c8afe684SRob Clark 
1013528107c8SRob Clark 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1014528107c8SRob Clark 			stats.all.count, stats.all.size);
1015528107c8SRob Clark 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1016528107c8SRob Clark 			stats.active.count, stats.active.size);
1017f48f3563SRob Clark 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1018f48f3563SRob Clark 			stats.resident.count, stats.resident.size);
1019f1902c6bSColin Ian King 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
10200054eeb7SRob Clark 			stats.purgeable.count, stats.purgeable.size);
1021528107c8SRob Clark 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1022528107c8SRob Clark 			stats.purged.count, stats.purged.size);
1023c8afe684SRob Clark }
1024c8afe684SRob Clark #endif
1025c8afe684SRob Clark 
1026030af2b0SRob Clark /* don't call directly!  Use drm_gem_object_put() */
10276867c9afSRob Clark static void msm_gem_free_object(struct drm_gem_object *obj)
1028c8afe684SRob Clark {
1029c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
103048e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
103148e7f183SKristian H. Kristensen 	struct msm_drm_private *priv = dev->dev_private;
103248e7f183SKristian H. Kristensen 
10336ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
10346ed0897cSRob Clark 	list_del(&msm_obj->node);
10356ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
10366ed0897cSRob Clark 
1037d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
1038cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
10390054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
1040c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
1041d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
1042c8afe684SRob Clark 
1043c8afe684SRob Clark 	/* object should not be on active list: */
104490643a24SRob Clark 	GEM_WARN_ON(is_active(msm_obj));
1045c8afe684SRob Clark 
104620d0ae2fSRob Clark 	put_iova_spaces(obj, true);
1047c8afe684SRob Clark 
104805b84911SRob Clark 	if (obj->import_attach) {
104990643a24SRob Clark 		GEM_WARN_ON(msm_obj->vaddr);
105005b84911SRob Clark 
105105b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
105205b84911SRob Clark 		 * ours, just free the array we allocated:
105305b84911SRob Clark 		 */
10542098105eSMichal Hocko 		kvfree(msm_obj->pages);
105505b84911SRob Clark 
105657f04815SRob Clark 		put_iova_vmas(obj);
105757f04815SRob Clark 
1058f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
105905b84911SRob Clark 	} else {
1060599089c6SRob Clark 		msm_gem_vunmap(obj);
1061c8afe684SRob Clark 		put_pages(obj);
106257f04815SRob Clark 		put_iova_vmas(obj);
106305b84911SRob Clark 	}
1064c8afe684SRob Clark 
1065c8afe684SRob Clark 	drm_gem_object_release(obj);
1066c8afe684SRob Clark 
1067c8afe684SRob Clark 	kfree(msm_obj);
1068c8afe684SRob Clark }
1069c8afe684SRob Clark 
1070510410bfSThomas Zimmermann static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1071510410bfSThomas Zimmermann {
1072510410bfSThomas Zimmermann 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1073510410bfSThomas Zimmermann 
10748246913aSRob Clark 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1075510410bfSThomas Zimmermann 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1076510410bfSThomas Zimmermann 
1077510410bfSThomas Zimmermann 	return 0;
1078510410bfSThomas Zimmermann }
1079510410bfSThomas Zimmermann 
1080c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
1081c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
10820815d774SJordan Crouse 		uint32_t size, uint32_t flags, uint32_t *handle,
10830815d774SJordan Crouse 		char *name)
1084c8afe684SRob Clark {
1085c8afe684SRob Clark 	struct drm_gem_object *obj;
1086c8afe684SRob Clark 	int ret;
1087c8afe684SRob Clark 
1088c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
1089c8afe684SRob Clark 
1090c8afe684SRob Clark 	if (IS_ERR(obj))
1091c8afe684SRob Clark 		return PTR_ERR(obj);
1092c8afe684SRob Clark 
10930815d774SJordan Crouse 	if (name)
10940815d774SJordan Crouse 		msm_gem_object_set_name(obj, "%s", name);
10950815d774SJordan Crouse 
1096c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
1097c8afe684SRob Clark 
1098c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
1099f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1100c8afe684SRob Clark 
1101c8afe684SRob Clark 	return ret;
1102c8afe684SRob Clark }
1103c8afe684SRob Clark 
11043c9edd9cSThomas Zimmermann static const struct vm_operations_struct vm_ops = {
11053c9edd9cSThomas Zimmermann 	.fault = msm_gem_fault,
11063c9edd9cSThomas Zimmermann 	.open = drm_gem_vm_open,
11073c9edd9cSThomas Zimmermann 	.close = drm_gem_vm_close,
11083c9edd9cSThomas Zimmermann };
11093c9edd9cSThomas Zimmermann 
11103c9edd9cSThomas Zimmermann static const struct drm_gem_object_funcs msm_gem_object_funcs = {
11113c9edd9cSThomas Zimmermann 	.free = msm_gem_free_object,
11123c9edd9cSThomas Zimmermann 	.pin = msm_gem_prime_pin,
11133c9edd9cSThomas Zimmermann 	.unpin = msm_gem_prime_unpin,
11143c9edd9cSThomas Zimmermann 	.get_sg_table = msm_gem_prime_get_sg_table,
11153c9edd9cSThomas Zimmermann 	.vmap = msm_gem_prime_vmap,
11163c9edd9cSThomas Zimmermann 	.vunmap = msm_gem_prime_vunmap,
1117510410bfSThomas Zimmermann 	.mmap = msm_gem_object_mmap,
11183c9edd9cSThomas Zimmermann 	.vm_ops = &vm_ops,
11193c9edd9cSThomas Zimmermann };
11203c9edd9cSThomas Zimmermann 
112105b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
112205b84911SRob Clark 		uint32_t size, uint32_t flags,
11233cbdc8d8SAkhil P Oommen 		struct drm_gem_object **obj)
1124c8afe684SRob Clark {
1125d12e3390SJonathan Marek 	struct msm_drm_private *priv = dev->dev_private;
1126c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1127c8afe684SRob Clark 
1128c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
1129c8afe684SRob Clark 	case MSM_BO_CACHED:
1130c8afe684SRob Clark 	case MSM_BO_WC:
1131c8afe684SRob Clark 		break;
1132d12e3390SJonathan Marek 	case MSM_BO_CACHED_COHERENT:
1133d12e3390SJonathan Marek 		if (priv->has_cached_coherent)
1134d12e3390SJonathan Marek 			break;
1135e181ad43SGustavo A. R. Silva 		fallthrough;
1136c8afe684SRob Clark 	default:
11372d1d175aSRob Clark 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1138c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
113905b84911SRob Clark 		return -EINVAL;
1140c8afe684SRob Clark 	}
1141c8afe684SRob Clark 
1142667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
114305b84911SRob Clark 	if (!msm_obj)
114405b84911SRob Clark 		return -ENOMEM;
1145c8afe684SRob Clark 
1146c8afe684SRob Clark 	msm_obj->flags = flags;
11474cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
1148c8afe684SRob Clark 
1149027d052aSDan Carpenter 	INIT_LIST_HEAD(&msm_obj->node);
11504b85f7f5SRob Clark 	INIT_LIST_HEAD(&msm_obj->vmas);
11514b85f7f5SRob Clark 
115205b84911SRob Clark 	*obj = &msm_obj->base;
11533c9edd9cSThomas Zimmermann 	(*obj)->funcs = &msm_gem_object_funcs;
115405b84911SRob Clark 
115505b84911SRob Clark 	return 0;
115605b84911SRob Clark }
115705b84911SRob Clark 
1158030af2b0SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
115905b84911SRob Clark {
1160f4839bd5SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
11613cbdc8d8SAkhil P Oommen 	struct msm_gem_object *msm_obj;
1162871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
1163f4839bd5SRob Clark 	bool use_vram = false;
116405b84911SRob Clark 	int ret;
116505b84911SRob Clark 
116605b84911SRob Clark 	size = PAGE_ALIGN(size);
116705b84911SRob Clark 
1168c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev))
1169f4839bd5SRob Clark 		use_vram = true;
117086f46f25SJonathan Marek 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1171f4839bd5SRob Clark 		use_vram = true;
1172f4839bd5SRob Clark 
117390643a24SRob Clark 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1174f4839bd5SRob Clark 		return ERR_PTR(-EINVAL);
1175f4839bd5SRob Clark 
11761a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
11771a5dff5dSJordan Crouse 	 * infrastructure grumpy
11781a5dff5dSJordan Crouse 	 */
11791a5dff5dSJordan Crouse 	if (size == 0)
11801a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
11811a5dff5dSJordan Crouse 
11823cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, flags, &obj);
118305b84911SRob Clark 	if (ret)
11842203bd0eSDan Carpenter 		return ERR_PTR(ret);
118505b84911SRob Clark 
11863cbdc8d8SAkhil P Oommen 	msm_obj = to_msm_bo(obj);
11873cbdc8d8SAkhil P Oommen 
1188f4839bd5SRob Clark 	if (use_vram) {
11894b85f7f5SRob Clark 		struct msm_gem_vma *vma;
1190f4839bd5SRob Clark 		struct page **pages;
1191b3949a9aSHans Verkuil 
1192a694ffedSIskren Chernev 		drm_gem_private_object_init(dev, obj, size);
1193a694ffedSIskren Chernev 
1194a6ae74c9SRob Clark 		msm_gem_lock(obj);
1195f4839bd5SRob Clark 
11964b85f7f5SRob Clark 		vma = add_vma(obj, NULL);
1197a6ae74c9SRob Clark 		msm_gem_unlock(obj);
11984b85f7f5SRob Clark 		if (IS_ERR(vma)) {
11994b85f7f5SRob Clark 			ret = PTR_ERR(vma);
12004b85f7f5SRob Clark 			goto fail;
12014b85f7f5SRob Clark 		}
12024b85f7f5SRob Clark 
12034b85f7f5SRob Clark 		to_msm_bo(obj)->vram_node = &vma->node;
12044b85f7f5SRob Clark 
120545f56690SAlexey Minnekhanov 		/* Call chain get_pages() -> update_inactive() tries to
120645f56690SAlexey Minnekhanov 		 * access msm_obj->mm_list, but it is not initialized yet.
120745f56690SAlexey Minnekhanov 		 * To avoid NULL pointer dereference error, initialize
120845f56690SAlexey Minnekhanov 		 * mm_list to be empty.
120945f56690SAlexey Minnekhanov 		 */
121045f56690SAlexey Minnekhanov 		INIT_LIST_HEAD(&msm_obj->mm_list);
121145f56690SAlexey Minnekhanov 
121207fcad0dSIskren Chernev 		msm_gem_lock(obj);
1213f4839bd5SRob Clark 		pages = get_pages(obj);
121407fcad0dSIskren Chernev 		msm_gem_unlock(obj);
1215f4839bd5SRob Clark 		if (IS_ERR(pages)) {
1216f4839bd5SRob Clark 			ret = PTR_ERR(pages);
1217f4839bd5SRob Clark 			goto fail;
1218f4839bd5SRob Clark 		}
12194b85f7f5SRob Clark 
12204b85f7f5SRob Clark 		vma->iova = physaddr(obj);
1221f4839bd5SRob Clark 	} else {
122205b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
122305b84911SRob Clark 		if (ret)
122405b84911SRob Clark 			goto fail;
12250abdba47SLucas Stach 		/*
12260abdba47SLucas Stach 		 * Our buffers are kept pinned, so allocating them from the
12270abdba47SLucas Stach 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
12280abdba47SLucas Stach 		 * See comments above new_inode() why this is required _and_
12290abdba47SLucas Stach 		 * expected if you're going to pin these pages.
12300abdba47SLucas Stach 		 */
12310abdba47SLucas Stach 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1232871d812aSRob Clark 	}
123305b84911SRob Clark 
1234d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
123564fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1236d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
12373cbdc8d8SAkhil P Oommen 
12386ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
12396ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
12406ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
12416ed0897cSRob Clark 
124205b84911SRob Clark 	return obj;
124305b84911SRob Clark 
124405b84911SRob Clark fail:
1245f7d33950SEmil Velikov 	drm_gem_object_put(obj);
124605b84911SRob Clark 	return ERR_PTR(ret);
124705b84911SRob Clark }
124805b84911SRob Clark 
124905b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
125079f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
125105b84911SRob Clark {
12523cbdc8d8SAkhil P Oommen 	struct msm_drm_private *priv = dev->dev_private;
125305b84911SRob Clark 	struct msm_gem_object *msm_obj;
125405b84911SRob Clark 	struct drm_gem_object *obj;
125579f0e202SRob Clark 	uint32_t size;
125605b84911SRob Clark 	int ret, npages;
125705b84911SRob Clark 
1258871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
1259c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev)) {
12606a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1261871d812aSRob Clark 		return ERR_PTR(-EINVAL);
1262871d812aSRob Clark 	}
1263871d812aSRob Clark 
126479f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
126505b84911SRob Clark 
12663cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
126705b84911SRob Clark 	if (ret)
12682203bd0eSDan Carpenter 		return ERR_PTR(ret);
126905b84911SRob Clark 
127005b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
127105b84911SRob Clark 
127205b84911SRob Clark 	npages = size / PAGE_SIZE;
127305b84911SRob Clark 
127405b84911SRob Clark 	msm_obj = to_msm_bo(obj);
1275a6ae74c9SRob Clark 	msm_gem_lock(obj);
127605b84911SRob Clark 	msm_obj->sgt = sgt;
12772098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
127805b84911SRob Clark 	if (!msm_obj->pages) {
1279a6ae74c9SRob Clark 		msm_gem_unlock(obj);
128005b84911SRob Clark 		ret = -ENOMEM;
128105b84911SRob Clark 		goto fail;
128205b84911SRob Clark 	}
128305b84911SRob Clark 
1284c67e6279SChristian König 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
12850e08270aSSushmita Susheelendra 	if (ret) {
1286a6ae74c9SRob Clark 		msm_gem_unlock(obj);
128705b84911SRob Clark 		goto fail;
12880e08270aSSushmita Susheelendra 	}
128905b84911SRob Clark 
1290a6ae74c9SRob Clark 	msm_gem_unlock(obj);
12913cbdc8d8SAkhil P Oommen 
1292d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
129364fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1294d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
12953cbdc8d8SAkhil P Oommen 
12966ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
12976ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
12986ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
12996ed0897cSRob Clark 
1300c8afe684SRob Clark 	return obj;
1301c8afe684SRob Clark 
1302c8afe684SRob Clark fail:
1303f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1304c8afe684SRob Clark 	return ERR_PTR(ret);
1305c8afe684SRob Clark }
13068223286dSJordan Crouse 
1307030af2b0SRob Clark void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
13088223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
1309030af2b0SRob Clark 		struct drm_gem_object **bo, uint64_t *iova)
13108223286dSJordan Crouse {
13118223286dSJordan Crouse 	void *vaddr;
1312030af2b0SRob Clark 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
13138223286dSJordan Crouse 	int ret;
13148223286dSJordan Crouse 
13158223286dSJordan Crouse 	if (IS_ERR(obj))
13168223286dSJordan Crouse 		return ERR_CAST(obj);
13178223286dSJordan Crouse 
13188223286dSJordan Crouse 	if (iova) {
13199fe041f6SJordan Crouse 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
132093f7abf1SJordan Crouse 		if (ret)
132193f7abf1SJordan Crouse 			goto err;
13228223286dSJordan Crouse 	}
13238223286dSJordan Crouse 
13248223286dSJordan Crouse 	vaddr = msm_gem_get_vaddr(obj);
1325c9811d0fSWei Yongjun 	if (IS_ERR(vaddr)) {
13267ad0e8cfSJordan Crouse 		msm_gem_unpin_iova(obj, aspace);
132793f7abf1SJordan Crouse 		ret = PTR_ERR(vaddr);
132893f7abf1SJordan Crouse 		goto err;
13298223286dSJordan Crouse 	}
13308223286dSJordan Crouse 
13318223286dSJordan Crouse 	if (bo)
13328223286dSJordan Crouse 		*bo = obj;
13338223286dSJordan Crouse 
13348223286dSJordan Crouse 	return vaddr;
133593f7abf1SJordan Crouse err:
1336f7d33950SEmil Velikov 	drm_gem_object_put(obj);
133793f7abf1SJordan Crouse 
133893f7abf1SJordan Crouse 	return ERR_PTR(ret);
133993f7abf1SJordan Crouse 
13408223286dSJordan Crouse }
13418223286dSJordan Crouse 
13421e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo,
1343030af2b0SRob Clark 		struct msm_gem_address_space *aspace)
13441e29dff0SJordan Crouse {
13451e29dff0SJordan Crouse 	if (IS_ERR_OR_NULL(bo))
13461e29dff0SJordan Crouse 		return;
13471e29dff0SJordan Crouse 
13481e29dff0SJordan Crouse 	msm_gem_put_vaddr(bo);
13497ad0e8cfSJordan Crouse 	msm_gem_unpin_iova(bo, aspace);
1350f7d33950SEmil Velikov 	drm_gem_object_put(bo);
13511e29dff0SJordan Crouse }
13520815d774SJordan Crouse 
13530815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
13540815d774SJordan Crouse {
13550815d774SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
13560815d774SJordan Crouse 	va_list ap;
13570815d774SJordan Crouse 
13580815d774SJordan Crouse 	if (!fmt)
13590815d774SJordan Crouse 		return;
13600815d774SJordan Crouse 
13610815d774SJordan Crouse 	va_start(ap, fmt);
13620815d774SJordan Crouse 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
13630815d774SJordan Crouse 	va_end(ap);
13640815d774SJordan Crouse }
1365