xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision f19ee2f3)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8afe684SRob Clark /*
3c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
4c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5c8afe684SRob Clark  */
6c8afe684SRob Clark 
70a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
8c8afe684SRob Clark #include <linux/spinlock.h>
9c8afe684SRob Clark #include <linux/shmem_fs.h>
1005b84911SRob Clark #include <linux/dma-buf.h>
1101c8f1c4SDan Williams #include <linux/pfn_t.h>
12c8afe684SRob Clark 
13feea39a8SSam Ravnborg #include <drm/drm_prime.h>
14feea39a8SSam Ravnborg 
15c8afe684SRob Clark #include "msm_drv.h"
16fde5de6cSRob Clark #include "msm_fence.h"
17c8afe684SRob Clark #include "msm_gem.h"
187198e6b0SRob Clark #include "msm_gpu.h"
19871d812aSRob Clark #include "msm_mmu.h"
20c8afe684SRob Clark 
213edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj);
220e08270aSSushmita Susheelendra 
23871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
24871d812aSRob Clark {
25871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
26871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
27871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28871d812aSRob Clark 			priv->vram.paddr;
29871d812aSRob Clark }
30871d812aSRob Clark 
31072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
32072f1f91SRob Clark {
33072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
34072f1f91SRob Clark 	return !msm_obj->vram_node;
35072f1f91SRob Clark }
36072f1f91SRob Clark 
373de433c5SRob Clark /*
383de433c5SRob Clark  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
393de433c5SRob Clark  * API.  Really GPU cache is out of scope here (handled on cmdstream)
403de433c5SRob Clark  * and all we need to do is invalidate newly allocated pages before
413de433c5SRob Clark  * mapping to CPU as uncached/writecombine.
423de433c5SRob Clark  *
433de433c5SRob Clark  * On top of this, we have the added headache, that depending on
443de433c5SRob Clark  * display generation, the display's iommu may be wired up to either
453de433c5SRob Clark  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
463de433c5SRob Clark  * that here we either have dma-direct or iommu ops.
473de433c5SRob Clark  *
483de433c5SRob Clark  * Let this be a cautionary tail of abstraction gone wrong.
493de433c5SRob Clark  */
503de433c5SRob Clark 
513de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj)
523de433c5SRob Clark {
533de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
543de433c5SRob Clark 
557690a33fSMarek Szyprowski 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
563de433c5SRob Clark }
573de433c5SRob Clark 
583de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj)
593de433c5SRob Clark {
603de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
613de433c5SRob Clark 
627690a33fSMarek Szyprowski 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
633de433c5SRob Clark }
643de433c5SRob Clark 
65871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
660e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
67871d812aSRob Clark {
68871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
69871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
70871d812aSRob Clark 	dma_addr_t paddr;
71871d812aSRob Clark 	struct page **p;
72871d812aSRob Clark 	int ret, i;
73871d812aSRob Clark 
742098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
75871d812aSRob Clark 	if (!p)
76871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
77871d812aSRob Clark 
780e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
794e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
800e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
81871d812aSRob Clark 	if (ret) {
822098105eSMichal Hocko 		kvfree(p);
83871d812aSRob Clark 		return ERR_PTR(ret);
84871d812aSRob Clark 	}
85871d812aSRob Clark 
86871d812aSRob Clark 	paddr = physaddr(obj);
87871d812aSRob Clark 	for (i = 0; i < npages; i++) {
88b3ed524fSChristian König 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
89871d812aSRob Clark 		paddr += PAGE_SIZE;
90871d812aSRob Clark 	}
91871d812aSRob Clark 
92871d812aSRob Clark 	return p;
93871d812aSRob Clark }
94c8afe684SRob Clark 
95c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
96c8afe684SRob Clark {
97c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
98c8afe684SRob Clark 
9990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
10007fcad0dSIskren Chernev 
101c8afe684SRob Clark 	if (!msm_obj->pages) {
102c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
103871d812aSRob Clark 		struct page **p;
104c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
105c8afe684SRob Clark 
106072f1f91SRob Clark 		if (use_pages(obj))
1070cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
108871d812aSRob Clark 		else
109871d812aSRob Clark 			p = get_pages_vram(obj, npages);
110871d812aSRob Clark 
111c8afe684SRob Clark 		if (IS_ERR(p)) {
1126a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
113c8afe684SRob Clark 					PTR_ERR(p));
114c8afe684SRob Clark 			return p;
115c8afe684SRob Clark 		}
116c8afe684SRob Clark 
11762e3a3e3SPrakash Kamliya 		msm_obj->pages = p;
11862e3a3e3SPrakash Kamliya 
119707d561fSGerd Hoffmann 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1201f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
12162e3a3e3SPrakash Kamliya 			void *ptr = ERR_CAST(msm_obj->sgt);
122c8afe684SRob Clark 
1236a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
12462e3a3e3SPrakash Kamliya 			msm_obj->sgt = NULL;
12562e3a3e3SPrakash Kamliya 			return ptr;
12662e3a3e3SPrakash Kamliya 		}
127c8afe684SRob Clark 
128c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
129c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
130c8afe684SRob Clark 		 */
131c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1323de433c5SRob Clark 			sync_for_device(msm_obj);
13364fcbde7SRob Clark 
13464fcbde7SRob Clark 		update_inactive(msm_obj);
135c8afe684SRob Clark 	}
136c8afe684SRob Clark 
137c8afe684SRob Clark 	return msm_obj->pages;
138c8afe684SRob Clark }
139c8afe684SRob Clark 
1400e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj)
1410e08270aSSushmita Susheelendra {
1420e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1430e08270aSSushmita Susheelendra 	struct msm_drm_private *priv = obj->dev->dev_private;
1440e08270aSSushmita Susheelendra 
1450e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
1460e08270aSSushmita Susheelendra 	drm_mm_remove_node(msm_obj->vram_node);
1470e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
1480e08270aSSushmita Susheelendra 
1490e08270aSSushmita Susheelendra 	kvfree(msm_obj->pages);
1500e08270aSSushmita Susheelendra }
1510e08270aSSushmita Susheelendra 
152c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
153c8afe684SRob Clark {
154c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
155c8afe684SRob Clark 
156c8afe684SRob Clark 	if (msm_obj->pages) {
1573976626eSBen Hutchings 		if (msm_obj->sgt) {
1583976626eSBen Hutchings 			/* For non-cached buffers, ensure the new
1593976626eSBen Hutchings 			 * pages are clean because display controller,
1603976626eSBen Hutchings 			 * GPU, etc. are not coherent:
161c8afe684SRob Clark 			 */
162c8afe684SRob Clark 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1633de433c5SRob Clark 				sync_for_cpu(msm_obj);
16462e3a3e3SPrakash Kamliya 
165c8afe684SRob Clark 			sg_free_table(msm_obj->sgt);
166c8afe684SRob Clark 			kfree(msm_obj->sgt);
167b9a31d0dSRob Clark 			msm_obj->sgt = NULL;
1683976626eSBen Hutchings 		}
169c8afe684SRob Clark 
170072f1f91SRob Clark 		if (use_pages(obj))
171c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1720e08270aSSushmita Susheelendra 		else
1730e08270aSSushmita Susheelendra 			put_pages_vram(obj);
174871d812aSRob Clark 
175c8afe684SRob Clark 		msm_obj->pages = NULL;
176c8afe684SRob Clark 	}
177c8afe684SRob Clark }
178c8afe684SRob Clark 
17905b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
18005b84911SRob Clark {
1810e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
18205b84911SRob Clark 	struct page **p;
1830e08270aSSushmita Susheelendra 
184a6ae74c9SRob Clark 	msm_gem_lock(obj);
1850e08270aSSushmita Susheelendra 
18690643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
187a6ae74c9SRob Clark 		msm_gem_unlock(obj);
1880e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
1890e08270aSSushmita Susheelendra 	}
1900e08270aSSushmita Susheelendra 
19105b84911SRob Clark 	p = get_pages(obj);
19210f76165SRob Clark 
19310f76165SRob Clark 	if (!IS_ERR(p)) {
19410f76165SRob Clark 		msm_obj->pin_count++;
19510f76165SRob Clark 		update_inactive(msm_obj);
19610f76165SRob Clark 	}
19710f76165SRob Clark 
198a6ae74c9SRob Clark 	msm_gem_unlock(obj);
19905b84911SRob Clark 	return p;
20005b84911SRob Clark }
20105b84911SRob Clark 
20205b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
20305b84911SRob Clark {
20410f76165SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
20510f76165SRob Clark 
20610f76165SRob Clark 	msm_gem_lock(obj);
20710f76165SRob Clark 	msm_obj->pin_count--;
20810f76165SRob Clark 	GEM_WARN_ON(msm_obj->pin_count < 0);
20910f76165SRob Clark 	update_inactive(msm_obj);
21010f76165SRob Clark 	msm_gem_unlock(obj);
21105b84911SRob Clark }
21205b84911SRob Clark 
213af9b3547SJonathan Marek static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
214af9b3547SJonathan Marek {
2159ef36443SJonathan Marek 	if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
216af9b3547SJonathan Marek 		return pgprot_writecombine(prot);
217af9b3547SJonathan Marek 	return prot;
218af9b3547SJonathan Marek }
219af9b3547SJonathan Marek 
2203c9edd9cSThomas Zimmermann static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
221c8afe684SRob Clark {
22211bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
223c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
2240e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
225c8afe684SRob Clark 	struct page **pages;
226c8afe684SRob Clark 	unsigned long pfn;
227c8afe684SRob Clark 	pgoff_t pgoff;
228a5f74ec7SSouptick Joarder 	int err;
229a5f74ec7SSouptick Joarder 	vm_fault_t ret;
230c8afe684SRob Clark 
2310e08270aSSushmita Susheelendra 	/*
2320e08270aSSushmita Susheelendra 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
2330e08270aSSushmita Susheelendra 	 * a reference on obj. So, we dont need to hold one here.
234d78d383aSRob Clark 	 */
235a6ae74c9SRob Clark 	err = msm_gem_lock_interruptible(obj);
236a5f74ec7SSouptick Joarder 	if (err) {
237a5f74ec7SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
238c8afe684SRob Clark 		goto out;
239a5f74ec7SSouptick Joarder 	}
240c8afe684SRob Clark 
24190643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
242a6ae74c9SRob Clark 		msm_gem_unlock(obj);
2430e08270aSSushmita Susheelendra 		return VM_FAULT_SIGBUS;
2440e08270aSSushmita Susheelendra 	}
2450e08270aSSushmita Susheelendra 
246c8afe684SRob Clark 	/* make sure we have pages attached now */
247c8afe684SRob Clark 	pages = get_pages(obj);
248c8afe684SRob Clark 	if (IS_ERR(pages)) {
249a5f74ec7SSouptick Joarder 		ret = vmf_error(PTR_ERR(pages));
250c8afe684SRob Clark 		goto out_unlock;
251c8afe684SRob Clark 	}
252c8afe684SRob Clark 
253c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2541a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
255c8afe684SRob Clark 
256871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
257c8afe684SRob Clark 
2581a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
259c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
260c8afe684SRob Clark 
261a5f74ec7SSouptick Joarder 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
262c8afe684SRob Clark out_unlock:
263a6ae74c9SRob Clark 	msm_gem_unlock(obj);
264c8afe684SRob Clark out:
265a5f74ec7SSouptick Joarder 	return ret;
266c8afe684SRob Clark }
267c8afe684SRob Clark 
268c8afe684SRob Clark /** get mmap offset */
269c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
270c8afe684SRob Clark {
271c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
272c8afe684SRob Clark 	int ret;
273c8afe684SRob Clark 
27490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
275c8afe684SRob Clark 
276c8afe684SRob Clark 	/* Make it mmapable */
277c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
278c8afe684SRob Clark 
279c8afe684SRob Clark 	if (ret) {
2806a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
281c8afe684SRob Clark 		return 0;
282c8afe684SRob Clark 	}
283c8afe684SRob Clark 
284c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
285c8afe684SRob Clark }
286c8afe684SRob Clark 
287c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
288c8afe684SRob Clark {
289c8afe684SRob Clark 	uint64_t offset;
2900e08270aSSushmita Susheelendra 
291a6ae74c9SRob Clark 	msm_gem_lock(obj);
292c8afe684SRob Clark 	offset = mmap_offset(obj);
293a6ae74c9SRob Clark 	msm_gem_unlock(obj);
294c8afe684SRob Clark 	return offset;
295c8afe684SRob Clark }
296c8afe684SRob Clark 
2974b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
2984b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
2994b85f7f5SRob Clark {
3004b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3014b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3024b85f7f5SRob Clark 
30390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3040e08270aSSushmita Susheelendra 
3054b85f7f5SRob Clark 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3064b85f7f5SRob Clark 	if (!vma)
3074b85f7f5SRob Clark 		return ERR_PTR(-ENOMEM);
3084b85f7f5SRob Clark 
3094b85f7f5SRob Clark 	vma->aspace = aspace;
3104b85f7f5SRob Clark 
3114b85f7f5SRob Clark 	list_add_tail(&vma->list, &msm_obj->vmas);
3124b85f7f5SRob Clark 
3134b85f7f5SRob Clark 	return vma;
3144b85f7f5SRob Clark }
3154b85f7f5SRob Clark 
3164b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
3174b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3184b85f7f5SRob Clark {
3194b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3204b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3214b85f7f5SRob Clark 
32290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3234b85f7f5SRob Clark 
3244b85f7f5SRob Clark 	list_for_each_entry(vma, &msm_obj->vmas, list) {
3254b85f7f5SRob Clark 		if (vma->aspace == aspace)
3264b85f7f5SRob Clark 			return vma;
3274b85f7f5SRob Clark 	}
3284b85f7f5SRob Clark 
3294b85f7f5SRob Clark 	return NULL;
3304b85f7f5SRob Clark }
3314b85f7f5SRob Clark 
3324b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma)
3334b85f7f5SRob Clark {
3344b85f7f5SRob Clark 	if (!vma)
3354b85f7f5SRob Clark 		return;
3364b85f7f5SRob Clark 
3374b85f7f5SRob Clark 	list_del(&vma->list);
3384b85f7f5SRob Clark 	kfree(vma);
3394b85f7f5SRob Clark }
3404b85f7f5SRob Clark 
34137c68900SLee Jones /*
34220d0ae2fSRob Clark  * If close is true, this also closes the VMA (releasing the allocated
34320d0ae2fSRob Clark  * iova range) in addition to removing the iommu mapping.  In the eviction
34420d0ae2fSRob Clark  * case (!close), we keep the iova allocated, but only remove the iommu
34520d0ae2fSRob Clark  * mapping.
34620d0ae2fSRob Clark  */
3474fe5f65eSRob Clark static void
34820d0ae2fSRob Clark put_iova_spaces(struct drm_gem_object *obj, bool close)
3494fe5f65eSRob Clark {
3504fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3519b73bde3SIskren Chernev 	struct msm_gem_vma *vma;
3524fe5f65eSRob Clark 
35390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3544fe5f65eSRob Clark 
3559b73bde3SIskren Chernev 	list_for_each_entry(vma, &msm_obj->vmas, list) {
356d67f1b6dSBrian Masney 		if (vma->aspace) {
3577ad0e8cfSJordan Crouse 			msm_gem_purge_vma(vma->aspace, vma);
35820d0ae2fSRob Clark 			if (close)
3597ad0e8cfSJordan Crouse 				msm_gem_close_vma(vma->aspace, vma);
360d67f1b6dSBrian Masney 		}
3619b73bde3SIskren Chernev 	}
3629b73bde3SIskren Chernev }
3639b73bde3SIskren Chernev 
3649b73bde3SIskren Chernev /* Called with msm_obj locked */
3659b73bde3SIskren Chernev static void
3669b73bde3SIskren Chernev put_iova_vmas(struct drm_gem_object *obj)
3674fe5f65eSRob Clark {
3684fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3694fe5f65eSRob Clark 	struct msm_gem_vma *vma, *tmp;
3704fe5f65eSRob Clark 
37190643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3724fe5f65eSRob Clark 
3734fe5f65eSRob Clark 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
3744b85f7f5SRob Clark 		del_vma(vma);
3754fe5f65eSRob Clark 	}
3764fe5f65eSRob Clark }
3774fe5f65eSRob Clark 
3788117e5e5SRob Clark static int get_iova_locked(struct drm_gem_object *obj,
379d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
380d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
381c8afe684SRob Clark {
3824b85f7f5SRob Clark 	struct msm_gem_vma *vma;
383c8afe684SRob Clark 	int ret = 0;
384c8afe684SRob Clark 
38590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
386cb1e3818SRob Clark 
3874b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
388871d812aSRob Clark 
3894b85f7f5SRob Clark 	if (!vma) {
3904b85f7f5SRob Clark 		vma = add_vma(obj, aspace);
391c0ee9794SJordan Crouse 		if (IS_ERR(vma))
392c0ee9794SJordan Crouse 			return PTR_ERR(vma);
3934b85f7f5SRob Clark 
394d3b8877eSJonathan Marek 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
395d3b8877eSJonathan Marek 			range_start, range_end);
396c0ee9794SJordan Crouse 		if (ret) {
397c0ee9794SJordan Crouse 			del_vma(vma);
398c0ee9794SJordan Crouse 			return ret;
399c8afe684SRob Clark 		}
4004b85f7f5SRob Clark 	}
4014b85f7f5SRob Clark 
4024b85f7f5SRob Clark 	*iova = vma->iova;
4034b85f7f5SRob Clark 	return 0;
404c0ee9794SJordan Crouse }
4054b85f7f5SRob Clark 
406c0ee9794SJordan Crouse static int msm_gem_pin_iova(struct drm_gem_object *obj,
407c0ee9794SJordan Crouse 		struct msm_gem_address_space *aspace)
408c0ee9794SJordan Crouse {
409c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
410c0ee9794SJordan Crouse 	struct msm_gem_vma *vma;
411c0ee9794SJordan Crouse 	struct page **pages;
41264fcbde7SRob Clark 	int ret, prot = IOMMU_READ;
413bbc2cd07SRob Clark 
414bbc2cd07SRob Clark 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
415bbc2cd07SRob Clark 		prot |= IOMMU_WRITE;
416c0ee9794SJordan Crouse 
4170b462d7aSJonathan Marek 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
4180b462d7aSJonathan Marek 		prot |= IOMMU_PRIV;
4190b462d7aSJonathan Marek 
420d12e3390SJonathan Marek 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
421d12e3390SJonathan Marek 		prot |= IOMMU_CACHE;
422d12e3390SJonathan Marek 
42390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
424c0ee9794SJordan Crouse 
42590643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
426c0ee9794SJordan Crouse 		return -EBUSY;
427c0ee9794SJordan Crouse 
428c0ee9794SJordan Crouse 	vma = lookup_vma(obj, aspace);
42990643a24SRob Clark 	if (GEM_WARN_ON(!vma))
430c0ee9794SJordan Crouse 		return -EINVAL;
431c0ee9794SJordan Crouse 
432c0ee9794SJordan Crouse 	pages = get_pages(obj);
433c0ee9794SJordan Crouse 	if (IS_ERR(pages))
434c0ee9794SJordan Crouse 		return PTR_ERR(pages);
435c0ee9794SJordan Crouse 
43664fcbde7SRob Clark 	ret = msm_gem_map_vma(aspace, vma, prot,
437bbc2cd07SRob Clark 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
43864fcbde7SRob Clark 
43964fcbde7SRob Clark 	if (!ret)
44064fcbde7SRob Clark 		msm_obj->pin_count++;
44164fcbde7SRob Clark 
44264fcbde7SRob Clark 	return ret;
443c0ee9794SJordan Crouse }
444c0ee9794SJordan Crouse 
445e4b87d22SRob Clark static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
446d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
447d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
448c0ee9794SJordan Crouse {
449c0ee9794SJordan Crouse 	u64 local;
450c0ee9794SJordan Crouse 	int ret;
451c0ee9794SJordan Crouse 
45290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
453c0ee9794SJordan Crouse 
4548117e5e5SRob Clark 	ret = get_iova_locked(obj, aspace, &local,
455d3b8877eSJonathan Marek 		range_start, range_end);
456c0ee9794SJordan Crouse 
457c0ee9794SJordan Crouse 	if (!ret)
458c0ee9794SJordan Crouse 		ret = msm_gem_pin_iova(obj, aspace);
459c0ee9794SJordan Crouse 
460c0ee9794SJordan Crouse 	if (!ret)
461c0ee9794SJordan Crouse 		*iova = local;
462c0ee9794SJordan Crouse 
463c8afe684SRob Clark 	return ret;
464c8afe684SRob Clark }
465c8afe684SRob Clark 
466e4b87d22SRob Clark /*
467e4b87d22SRob Clark  * get iova and pin it. Should have a matching put
468e4b87d22SRob Clark  * limits iova to specified range (in pages)
469e4b87d22SRob Clark  */
470e4b87d22SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
471e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova,
472e4b87d22SRob Clark 		u64 range_start, u64 range_end)
473e4b87d22SRob Clark {
474e4b87d22SRob Clark 	int ret;
475e4b87d22SRob Clark 
476e4b87d22SRob Clark 	msm_gem_lock(obj);
477e4b87d22SRob Clark 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
478e4b87d22SRob Clark 	msm_gem_unlock(obj);
479e4b87d22SRob Clark 
480e4b87d22SRob Clark 	return ret;
481e4b87d22SRob Clark }
482e4b87d22SRob Clark 
483e4b87d22SRob Clark int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
484e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova)
485e4b87d22SRob Clark {
486e4b87d22SRob Clark 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
487e4b87d22SRob Clark }
488e4b87d22SRob Clark 
489d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */
490d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
491d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova)
492d3b8877eSJonathan Marek {
493d3b8877eSJonathan Marek 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
494d3b8877eSJonathan Marek }
495d3b8877eSJonathan Marek 
4967ad0e8cfSJordan Crouse /*
4977ad0e8cfSJordan Crouse  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
4987ad0e8cfSJordan Crouse  * valid for the life of the object
4997ad0e8cfSJordan Crouse  */
5009fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj,
5019fe041f6SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
5029fe041f6SJordan Crouse {
5039fe041f6SJordan Crouse 	int ret;
5049fe041f6SJordan Crouse 
505a6ae74c9SRob Clark 	msm_gem_lock(obj);
5068117e5e5SRob Clark 	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
507a6ae74c9SRob Clark 	msm_gem_unlock(obj);
5089fe041f6SJordan Crouse 
5099fe041f6SJordan Crouse 	return ret;
5109fe041f6SJordan Crouse }
5119fe041f6SJordan Crouse 
5122638d90aSRob Clark /* get iova without taking a reference, used in places where you have
5139fe041f6SJordan Crouse  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
5142638d90aSRob Clark  */
5158bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj,
5168bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
5172638d90aSRob Clark {
5184b85f7f5SRob Clark 	struct msm_gem_vma *vma;
5194b85f7f5SRob Clark 
520a6ae74c9SRob Clark 	msm_gem_lock(obj);
5214b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
522a6ae74c9SRob Clark 	msm_gem_unlock(obj);
52390643a24SRob Clark 	GEM_WARN_ON(!vma);
5244b85f7f5SRob Clark 
5254b85f7f5SRob Clark 	return vma ? vma->iova : 0;
5262638d90aSRob Clark }
5272638d90aSRob Clark 
5287ad0e8cfSJordan Crouse /*
529e4b87d22SRob Clark  * Locked variant of msm_gem_unpin_iova()
530e4b87d22SRob Clark  */
531e4b87d22SRob Clark void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
532e4b87d22SRob Clark 		struct msm_gem_address_space *aspace)
533e4b87d22SRob Clark {
53464fcbde7SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
535e4b87d22SRob Clark 	struct msm_gem_vma *vma;
536e4b87d22SRob Clark 
53790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
538e4b87d22SRob Clark 
539e4b87d22SRob Clark 	vma = lookup_vma(obj, aspace);
540e4b87d22SRob Clark 
54164fcbde7SRob Clark 	if (!GEM_WARN_ON(!vma)) {
542e4b87d22SRob Clark 		msm_gem_unmap_vma(aspace, vma);
54364fcbde7SRob Clark 
54464fcbde7SRob Clark 		msm_obj->pin_count--;
54564fcbde7SRob Clark 		GEM_WARN_ON(msm_obj->pin_count < 0);
54664fcbde7SRob Clark 
54764fcbde7SRob Clark 		update_inactive(msm_obj);
54864fcbde7SRob Clark 	}
549e4b87d22SRob Clark }
550e4b87d22SRob Clark 
551e4b87d22SRob Clark /*
5527ad0e8cfSJordan Crouse  * Unpin a iova by updating the reference counts. The memory isn't actually
5537ad0e8cfSJordan Crouse  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
5547ad0e8cfSJordan Crouse  * to get rid of it
5557ad0e8cfSJordan Crouse  */
5567ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj,
5578bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
558c8afe684SRob Clark {
559a6ae74c9SRob Clark 	msm_gem_lock(obj);
560e4b87d22SRob Clark 	msm_gem_unpin_iova_locked(obj, aspace);
561a6ae74c9SRob Clark 	msm_gem_unlock(obj);
562c8afe684SRob Clark }
563c8afe684SRob Clark 
564c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
565c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
566c8afe684SRob Clark {
567c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
568c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
569c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
5700815d774SJordan Crouse 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
571c8afe684SRob Clark }
572c8afe684SRob Clark 
573c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
574c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
575c8afe684SRob Clark {
576c8afe684SRob Clark 	struct drm_gem_object *obj;
577c8afe684SRob Clark 	int ret = 0;
578c8afe684SRob Clark 
579c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
580a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
581c8afe684SRob Clark 	if (obj == NULL) {
582c8afe684SRob Clark 		ret = -ENOENT;
583c8afe684SRob Clark 		goto fail;
584c8afe684SRob Clark 	}
585c8afe684SRob Clark 
586c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
587c8afe684SRob Clark 
588f7d33950SEmil Velikov 	drm_gem_object_put(obj);
589c8afe684SRob Clark 
590c8afe684SRob Clark fail:
591c8afe684SRob Clark 	return ret;
592c8afe684SRob Clark }
593c8afe684SRob Clark 
594fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
595c8afe684SRob Clark {
5960e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5970e08270aSSushmita Susheelendra 	int ret = 0;
5980e08270aSSushmita Susheelendra 
59990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
600e4b87d22SRob Clark 
6018b6b7d84SDaniel Vetter 	if (obj->import_attach)
6028b6b7d84SDaniel Vetter 		return ERR_PTR(-ENODEV);
6038b6b7d84SDaniel Vetter 
60490643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
6056a41da17SMamta Shukla 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
606fad33f4bSRob Clark 			msm_obj->madv, madv);
6070e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
608c8afe684SRob Clark 	}
609c8afe684SRob Clark 
6100e08270aSSushmita Susheelendra 	/* increment vmap_count *before* vmap() call, so shrinker can
611a6ae74c9SRob Clark 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
6120e08270aSSushmita Susheelendra 	 * This guarantees that we won't try to msm_gem_vunmap() this
6130e08270aSSushmita Susheelendra 	 * same object from within the vmap() call (while we already
614a6ae74c9SRob Clark 	 * hold msm_obj lock)
6150e08270aSSushmita Susheelendra 	 */
6160e08270aSSushmita Susheelendra 	msm_obj->vmap_count++;
6170e08270aSSushmita Susheelendra 
6180e08270aSSushmita Susheelendra 	if (!msm_obj->vaddr) {
6190e08270aSSushmita Susheelendra 		struct page **pages = get_pages(obj);
6200e08270aSSushmita Susheelendra 		if (IS_ERR(pages)) {
6210e08270aSSushmita Susheelendra 			ret = PTR_ERR(pages);
6220e08270aSSushmita Susheelendra 			goto fail;
6230e08270aSSushmita Susheelendra 		}
6240e08270aSSushmita Susheelendra 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
625af9b3547SJonathan Marek 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
6260e08270aSSushmita Susheelendra 		if (msm_obj->vaddr == NULL) {
6270e08270aSSushmita Susheelendra 			ret = -ENOMEM;
6280e08270aSSushmita Susheelendra 			goto fail;
6290e08270aSSushmita Susheelendra 		}
63010f76165SRob Clark 
63110f76165SRob Clark 		update_inactive(msm_obj);
6320e08270aSSushmita Susheelendra 	}
6330e08270aSSushmita Susheelendra 
6340e08270aSSushmita Susheelendra 	return msm_obj->vaddr;
6350e08270aSSushmita Susheelendra 
6360e08270aSSushmita Susheelendra fail:
637e1e9db2cSRob Clark 	msm_obj->vmap_count--;
6380e08270aSSushmita Susheelendra 	return ERR_PTR(ret);
63918f23049SRob Clark }
64018f23049SRob Clark 
641e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
642e4b87d22SRob Clark {
643e4b87d22SRob Clark 	return get_vaddr(obj, MSM_MADV_WILLNEED);
644e4b87d22SRob Clark }
645e4b87d22SRob Clark 
646fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
647fad33f4bSRob Clark {
648e4b87d22SRob Clark 	void *ret;
649e4b87d22SRob Clark 
650e4b87d22SRob Clark 	msm_gem_lock(obj);
651e4b87d22SRob Clark 	ret = msm_gem_get_vaddr_locked(obj);
652e4b87d22SRob Clark 	msm_gem_unlock(obj);
653e4b87d22SRob Clark 
654e4b87d22SRob Clark 	return ret;
655fad33f4bSRob Clark }
656fad33f4bSRob Clark 
657fad33f4bSRob Clark /*
658fad33f4bSRob Clark  * Don't use this!  It is for the very special case of dumping
659fad33f4bSRob Clark  * submits from GPU hangs or faults, were the bo may already
660fad33f4bSRob Clark  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
661fad33f4bSRob Clark  * active list.
662fad33f4bSRob Clark  */
663fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
664fad33f4bSRob Clark {
665fad33f4bSRob Clark 	return get_vaddr(obj, __MSM_MADV_PURGED);
666fad33f4bSRob Clark }
667fad33f4bSRob Clark 
668e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
66918f23049SRob Clark {
6700e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6710e08270aSSushmita Susheelendra 
67290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
67390643a24SRob Clark 	GEM_WARN_ON(msm_obj->vmap_count < 1);
674e4b87d22SRob Clark 
6750e08270aSSushmita Susheelendra 	msm_obj->vmap_count--;
6764cd33c48SRob Clark }
6770e08270aSSushmita Susheelendra 
6780e08270aSSushmita Susheelendra void msm_gem_put_vaddr(struct drm_gem_object *obj)
6794cd33c48SRob Clark {
680a6ae74c9SRob Clark 	msm_gem_lock(obj);
681e4b87d22SRob Clark 	msm_gem_put_vaddr_locked(obj);
682a6ae74c9SRob Clark 	msm_gem_unlock(obj);
6834cd33c48SRob Clark }
6844cd33c48SRob Clark 
6854cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
6864cd33c48SRob Clark  * false or -errno.
6874cd33c48SRob Clark  */
6884cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
6894cd33c48SRob Clark {
6904cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
69168209390SRob Clark 
692a6ae74c9SRob Clark 	msm_gem_lock(obj);
693c8afe684SRob Clark 
694c8afe684SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
695c8afe684SRob Clark 		msm_obj->madv = madv;
696c8afe684SRob Clark 
6970e08270aSSushmita Susheelendra 	madv = msm_obj->madv;
6980e08270aSSushmita Susheelendra 
6993edfa30fSRob Clark 	/* If the obj is inactive, we might need to move it
7003edfa30fSRob Clark 	 * between inactive lists
7013edfa30fSRob Clark 	 */
7023edfa30fSRob Clark 	if (msm_obj->active_count == 0)
7033edfa30fSRob Clark 		update_inactive(msm_obj);
7043edfa30fSRob Clark 
705a6ae74c9SRob Clark 	msm_gem_unlock(obj);
7060e08270aSSushmita Susheelendra 
7070e08270aSSushmita Susheelendra 	return (madv != __MSM_MADV_PURGED);
708c8afe684SRob Clark }
709c8afe684SRob Clark 
710599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj)
71168209390SRob Clark {
71268209390SRob Clark 	struct drm_device *dev = obj->dev;
71368209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
71468209390SRob Clark 
71581d4d597SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
71690643a24SRob Clark 	GEM_WARN_ON(!is_purgeable(msm_obj));
71768209390SRob Clark 
71820d0ae2fSRob Clark 	/* Get rid of any iommu mapping(s): */
71920d0ae2fSRob Clark 	put_iova_spaces(obj, true);
7200e08270aSSushmita Susheelendra 
721599089c6SRob Clark 	msm_gem_vunmap(obj);
72268209390SRob Clark 
72381d4d597SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
72481d4d597SRob Clark 
72568209390SRob Clark 	put_pages(obj);
72668209390SRob Clark 
7279b73bde3SIskren Chernev 	put_iova_vmas(obj);
7289b73bde3SIskren Chernev 
72968209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
73025ed38b3SRob Clark 	update_inactive(msm_obj);
73168209390SRob Clark 
73268209390SRob Clark 	drm_gem_free_mmap_offset(obj);
73368209390SRob Clark 
73468209390SRob Clark 	/* Our goal here is to return as much of the memory as
73568209390SRob Clark 	 * is possible back to the system as we are called from OOM.
73668209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
73768209390SRob Clark 	 * backing pages, *now*.
73868209390SRob Clark 	 */
73968209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
74068209390SRob Clark 
74168209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
74268209390SRob Clark 			0, (loff_t)-1);
74368209390SRob Clark }
74468209390SRob Clark 
74537c68900SLee Jones /*
74663f17ef8SRob Clark  * Unpin the backing pages and make them available to be swapped out.
74763f17ef8SRob Clark  */
74863f17ef8SRob Clark void msm_gem_evict(struct drm_gem_object *obj)
74963f17ef8SRob Clark {
75063f17ef8SRob Clark 	struct drm_device *dev = obj->dev;
75163f17ef8SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
75263f17ef8SRob Clark 
75363f17ef8SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
75463f17ef8SRob Clark 	GEM_WARN_ON(is_unevictable(msm_obj));
75563f17ef8SRob Clark 	GEM_WARN_ON(!msm_obj->evictable);
75663f17ef8SRob Clark 	GEM_WARN_ON(msm_obj->active_count);
75763f17ef8SRob Clark 
75863f17ef8SRob Clark 	/* Get rid of any iommu mapping(s): */
75963f17ef8SRob Clark 	put_iova_spaces(obj, false);
76063f17ef8SRob Clark 
76163f17ef8SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
76263f17ef8SRob Clark 
76363f17ef8SRob Clark 	put_pages(obj);
76463f17ef8SRob Clark 
76563f17ef8SRob Clark 	update_inactive(msm_obj);
76663f17ef8SRob Clark }
76763f17ef8SRob Clark 
768599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj)
769e1e9db2cSRob Clark {
770e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
771e1e9db2cSRob Clark 
77290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
7730e08270aSSushmita Susheelendra 
77490643a24SRob Clark 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
775e1e9db2cSRob Clark 		return;
776e1e9db2cSRob Clark 
777e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
778e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
779e1e9db2cSRob Clark }
780e1e9db2cSRob Clark 
7819d8baa2bSAkhil P Oommen void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
7827198e6b0SRob Clark {
7837198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
784d984457bSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
785d984457bSRob Clark 
786d984457bSRob Clark 	might_sleep();
78790643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
78890643a24SRob Clark 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
78990643a24SRob Clark 	GEM_WARN_ON(msm_obj->dontneed);
7909d8baa2bSAkhil P Oommen 
791ab5c54cbSRob Clark 	if (msm_obj->active_count++ == 0) {
792d984457bSRob Clark 		mutex_lock(&priv->mm_lock);
79364fcbde7SRob Clark 		if (msm_obj->evictable)
79464fcbde7SRob Clark 			mark_unevictable(msm_obj);
795a83cc4fbSBaokun Li 		list_move_tail(&msm_obj->mm_list, &gpu->active_list);
796d984457bSRob Clark 		mutex_unlock(&priv->mm_lock);
7977198e6b0SRob Clark 	}
7989d8baa2bSAkhil P Oommen }
7997198e6b0SRob Clark 
8009d8baa2bSAkhil P Oommen void msm_gem_active_put(struct drm_gem_object *obj)
8017198e6b0SRob Clark {
8027198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
8037198e6b0SRob Clark 
804d984457bSRob Clark 	might_sleep();
80590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
8067198e6b0SRob Clark 
807ab5c54cbSRob Clark 	if (--msm_obj->active_count == 0) {
8083edfa30fSRob Clark 		update_inactive(msm_obj);
8097198e6b0SRob Clark 	}
8109d8baa2bSAkhil P Oommen }
8117198e6b0SRob Clark 
8123edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj)
8133edfa30fSRob Clark {
8143edfa30fSRob Clark 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
8153edfa30fSRob Clark 
81664fcbde7SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
81764fcbde7SRob Clark 
81864fcbde7SRob Clark 	if (msm_obj->active_count != 0)
81964fcbde7SRob Clark 		return;
82064fcbde7SRob Clark 
8213edfa30fSRob Clark 	mutex_lock(&priv->mm_lock);
8223edfa30fSRob Clark 
823cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
8240054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
82564fcbde7SRob Clark 	if (msm_obj->evictable)
82664fcbde7SRob Clark 		mark_unevictable(msm_obj);
827cc8a4d5aSRob Clark 
828cc8a4d5aSRob Clark 	list_del(&msm_obj->mm_list);
82964fcbde7SRob Clark 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
8303edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
83164fcbde7SRob Clark 		mark_evictable(msm_obj);
832cc8a4d5aSRob Clark 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
8333edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
8340054eeb7SRob Clark 		mark_purgeable(msm_obj);
835cc8a4d5aSRob Clark 	} else {
83664fcbde7SRob Clark 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
83764fcbde7SRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
838cc8a4d5aSRob Clark 	}
8393edfa30fSRob Clark 
8403edfa30fSRob Clark 	mutex_unlock(&priv->mm_lock);
8413edfa30fSRob Clark }
8423edfa30fSRob Clark 
843ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
844ba00c3f2SRob Clark {
845b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
846f755e227SChris Wilson 	unsigned long remain =
847f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
848f755e227SChris Wilson 	long ret;
849b6295f9aSRob Clark 
850d3fae3b3SChristian König 	ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
851f755e227SChris Wilson 	if (ret == 0)
852f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
853f755e227SChris Wilson 	else if (ret < 0)
854f755e227SChris Wilson 		return ret;
855ba00c3f2SRob Clark 
8567198e6b0SRob Clark 	/* TODO cache maintenance */
8577198e6b0SRob Clark 
858b6295f9aSRob Clark 	return 0;
8597198e6b0SRob Clark }
8607198e6b0SRob Clark 
8617198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
8627198e6b0SRob Clark {
8637198e6b0SRob Clark 	/* TODO cache maintenance */
864c8afe684SRob Clark 	return 0;
865c8afe684SRob Clark }
866c8afe684SRob Clark 
867c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
868528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
869528107c8SRob Clark 		struct msm_gem_stats *stats)
870c8afe684SRob Clark {
871c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
87252791eeeSChristian König 	struct dma_resv *robj = obj->resv;
8734b85f7f5SRob Clark 	struct msm_gem_vma *vma;
874c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
8754cd33c48SRob Clark 	const char *madv;
876c8afe684SRob Clark 
877a6ae74c9SRob Clark 	msm_gem_lock(obj);
878b6295f9aSRob Clark 
879528107c8SRob Clark 	stats->all.count++;
880528107c8SRob Clark 	stats->all.size += obj->size;
881528107c8SRob Clark 
882528107c8SRob Clark 	if (is_active(msm_obj)) {
883528107c8SRob Clark 		stats->active.count++;
884528107c8SRob Clark 		stats->active.size += obj->size;
885528107c8SRob Clark 	}
886528107c8SRob Clark 
887f48f3563SRob Clark 	if (msm_obj->pages) {
888f48f3563SRob Clark 		stats->resident.count++;
889f48f3563SRob Clark 		stats->resident.size += obj->size;
890f48f3563SRob Clark 	}
891f48f3563SRob Clark 
8924cd33c48SRob Clark 	switch (msm_obj->madv) {
8934cd33c48SRob Clark 	case __MSM_MADV_PURGED:
894528107c8SRob Clark 		stats->purged.count++;
895528107c8SRob Clark 		stats->purged.size += obj->size;
8964cd33c48SRob Clark 		madv = " purged";
8974cd33c48SRob Clark 		break;
8984cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
8990054eeb7SRob Clark 		stats->purgeable.count++;
9000054eeb7SRob Clark 		stats->purgeable.size += obj->size;
9014cd33c48SRob Clark 		madv = " purgeable";
9024cd33c48SRob Clark 		break;
9034cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
9044cd33c48SRob Clark 	default:
9054cd33c48SRob Clark 		madv = "";
9064cd33c48SRob Clark 		break;
9074cd33c48SRob Clark 	}
9084cd33c48SRob Clark 
909575f0485SJordan Crouse 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
9107198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
9112c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
912667ce33eSRob Clark 			off, msm_obj->vaddr);
913667ce33eSRob Clark 
9140815d774SJordan Crouse 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
915667ce33eSRob Clark 
916575f0485SJordan Crouse 	if (!list_empty(&msm_obj->vmas)) {
917575f0485SJordan Crouse 
918575f0485SJordan Crouse 		seq_puts(m, "      vmas:");
919575f0485SJordan Crouse 
92025faf2f2SRob Clark 		list_for_each_entry(vma, &msm_obj->vmas, list) {
92125faf2f2SRob Clark 			const char *name, *comm;
92225faf2f2SRob Clark 			if (vma->aspace) {
92325faf2f2SRob Clark 				struct msm_gem_address_space *aspace = vma->aspace;
92425faf2f2SRob Clark 				struct task_struct *task =
92525faf2f2SRob Clark 					get_pid_task(aspace->pid, PIDTYPE_PID);
92625faf2f2SRob Clark 				if (task) {
92725faf2f2SRob Clark 					comm = kstrdup(task->comm, GFP_KERNEL);
92825faf2f2SRob Clark 				} else {
92925faf2f2SRob Clark 					comm = NULL;
93025faf2f2SRob Clark 				}
93125faf2f2SRob Clark 				name = aspace->name;
93225faf2f2SRob Clark 			} else {
93325faf2f2SRob Clark 				name = comm = NULL;
93425faf2f2SRob Clark 			}
93525faf2f2SRob Clark 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
93625faf2f2SRob Clark 				name, comm ? ":" : "", comm ? comm : "",
93725faf2f2SRob Clark 				vma->aspace, vma->iova,
93825faf2f2SRob Clark 				vma->mapped ? "mapped" : "unmapped",
9397ad0e8cfSJordan Crouse 				vma->inuse);
94025faf2f2SRob Clark 			kfree(comm);
94125faf2f2SRob Clark 		}
942575f0485SJordan Crouse 
943575f0485SJordan Crouse 		seq_puts(m, "\n");
944575f0485SJordan Crouse 	}
945b6295f9aSRob Clark 
946*f19ee2f3SChristian König 	dma_resv_describe(robj, m);
947a6ae74c9SRob Clark 	msm_gem_unlock(obj);
948c8afe684SRob Clark }
949c8afe684SRob Clark 
950c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
951c8afe684SRob Clark {
952528107c8SRob Clark 	struct msm_gem_stats stats = {};
953c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
954c8afe684SRob Clark 
9550815d774SJordan Crouse 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
9566ed0897cSRob Clark 	list_for_each_entry(msm_obj, list, node) {
957c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
958575f0485SJordan Crouse 		seq_puts(m, "   ");
959528107c8SRob Clark 		msm_gem_describe(obj, m, &stats);
960c8afe684SRob Clark 	}
961c8afe684SRob Clark 
962528107c8SRob Clark 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
963528107c8SRob Clark 			stats.all.count, stats.all.size);
964528107c8SRob Clark 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
965528107c8SRob Clark 			stats.active.count, stats.active.size);
966f48f3563SRob Clark 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
967f48f3563SRob Clark 			stats.resident.count, stats.resident.size);
968f1902c6bSColin Ian King 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
9690054eeb7SRob Clark 			stats.purgeable.count, stats.purgeable.size);
970528107c8SRob Clark 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
971528107c8SRob Clark 			stats.purged.count, stats.purged.size);
972c8afe684SRob Clark }
973c8afe684SRob Clark #endif
974c8afe684SRob Clark 
975030af2b0SRob Clark /* don't call directly!  Use drm_gem_object_put() */
976c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
977c8afe684SRob Clark {
978c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
97948e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
98048e7f183SKristian H. Kristensen 	struct msm_drm_private *priv = dev->dev_private;
98148e7f183SKristian H. Kristensen 
9826ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
9836ed0897cSRob Clark 	list_del(&msm_obj->node);
9846ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
9856ed0897cSRob Clark 
986d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
987cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
9880054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
989c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
990d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
991c8afe684SRob Clark 
992a6ae74c9SRob Clark 	msm_gem_lock(obj);
993c8afe684SRob Clark 
994c8afe684SRob Clark 	/* object should not be on active list: */
99590643a24SRob Clark 	GEM_WARN_ON(is_active(msm_obj));
996c8afe684SRob Clark 
99720d0ae2fSRob Clark 	put_iova_spaces(obj, true);
998c8afe684SRob Clark 
99905b84911SRob Clark 	if (obj->import_attach) {
100090643a24SRob Clark 		GEM_WARN_ON(msm_obj->vaddr);
100105b84911SRob Clark 
100205b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
100305b84911SRob Clark 		 * ours, just free the array we allocated:
100405b84911SRob Clark 		 */
10052098105eSMichal Hocko 		kvfree(msm_obj->pages);
100605b84911SRob Clark 
100757f04815SRob Clark 		put_iova_vmas(obj);
100857f04815SRob Clark 
10096c0e3ea2SRob Clark 		/* dma_buf_detach() grabs resv lock, so we need to unlock
10106c0e3ea2SRob Clark 		 * prior to drm_prime_gem_destroy
10116c0e3ea2SRob Clark 		 */
10126c0e3ea2SRob Clark 		msm_gem_unlock(obj);
10136c0e3ea2SRob Clark 
1014f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
101505b84911SRob Clark 	} else {
1016599089c6SRob Clark 		msm_gem_vunmap(obj);
1017c8afe684SRob Clark 		put_pages(obj);
101857f04815SRob Clark 		put_iova_vmas(obj);
10196c0e3ea2SRob Clark 		msm_gem_unlock(obj);
102005b84911SRob Clark 	}
1021c8afe684SRob Clark 
1022c8afe684SRob Clark 	drm_gem_object_release(obj);
1023c8afe684SRob Clark 
1024c8afe684SRob Clark 	kfree(msm_obj);
1025c8afe684SRob Clark }
1026c8afe684SRob Clark 
1027510410bfSThomas Zimmermann static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1028510410bfSThomas Zimmermann {
1029510410bfSThomas Zimmermann 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1030510410bfSThomas Zimmermann 
1031510410bfSThomas Zimmermann 	vma->vm_flags &= ~VM_PFNMAP;
1032510410bfSThomas Zimmermann 	vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
1033510410bfSThomas Zimmermann 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1034510410bfSThomas Zimmermann 
1035510410bfSThomas Zimmermann 	return 0;
1036510410bfSThomas Zimmermann }
1037510410bfSThomas Zimmermann 
1038c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
1039c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
10400815d774SJordan Crouse 		uint32_t size, uint32_t flags, uint32_t *handle,
10410815d774SJordan Crouse 		char *name)
1042c8afe684SRob Clark {
1043c8afe684SRob Clark 	struct drm_gem_object *obj;
1044c8afe684SRob Clark 	int ret;
1045c8afe684SRob Clark 
1046c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
1047c8afe684SRob Clark 
1048c8afe684SRob Clark 	if (IS_ERR(obj))
1049c8afe684SRob Clark 		return PTR_ERR(obj);
1050c8afe684SRob Clark 
10510815d774SJordan Crouse 	if (name)
10520815d774SJordan Crouse 		msm_gem_object_set_name(obj, "%s", name);
10530815d774SJordan Crouse 
1054c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
1055c8afe684SRob Clark 
1056c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
1057f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1058c8afe684SRob Clark 
1059c8afe684SRob Clark 	return ret;
1060c8afe684SRob Clark }
1061c8afe684SRob Clark 
10623c9edd9cSThomas Zimmermann static const struct vm_operations_struct vm_ops = {
10633c9edd9cSThomas Zimmermann 	.fault = msm_gem_fault,
10643c9edd9cSThomas Zimmermann 	.open = drm_gem_vm_open,
10653c9edd9cSThomas Zimmermann 	.close = drm_gem_vm_close,
10663c9edd9cSThomas Zimmermann };
10673c9edd9cSThomas Zimmermann 
10683c9edd9cSThomas Zimmermann static const struct drm_gem_object_funcs msm_gem_object_funcs = {
10693c9edd9cSThomas Zimmermann 	.free = msm_gem_free_object,
10703c9edd9cSThomas Zimmermann 	.pin = msm_gem_prime_pin,
10713c9edd9cSThomas Zimmermann 	.unpin = msm_gem_prime_unpin,
10723c9edd9cSThomas Zimmermann 	.get_sg_table = msm_gem_prime_get_sg_table,
10733c9edd9cSThomas Zimmermann 	.vmap = msm_gem_prime_vmap,
10743c9edd9cSThomas Zimmermann 	.vunmap = msm_gem_prime_vunmap,
1075510410bfSThomas Zimmermann 	.mmap = msm_gem_object_mmap,
10763c9edd9cSThomas Zimmermann 	.vm_ops = &vm_ops,
10773c9edd9cSThomas Zimmermann };
10783c9edd9cSThomas Zimmermann 
107905b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
108005b84911SRob Clark 		uint32_t size, uint32_t flags,
10813cbdc8d8SAkhil P Oommen 		struct drm_gem_object **obj)
1082c8afe684SRob Clark {
1083d12e3390SJonathan Marek 	struct msm_drm_private *priv = dev->dev_private;
1084c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1085c8afe684SRob Clark 
1086c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
1087c8afe684SRob Clark 	case MSM_BO_UNCACHED:
1088c8afe684SRob Clark 	case MSM_BO_CACHED:
1089c8afe684SRob Clark 	case MSM_BO_WC:
1090c8afe684SRob Clark 		break;
1091d12e3390SJonathan Marek 	case MSM_BO_CACHED_COHERENT:
1092d12e3390SJonathan Marek 		if (priv->has_cached_coherent)
1093d12e3390SJonathan Marek 			break;
1094e181ad43SGustavo A. R. Silva 		fallthrough;
1095c8afe684SRob Clark 	default:
10966a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1097c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
109805b84911SRob Clark 		return -EINVAL;
1099c8afe684SRob Clark 	}
1100c8afe684SRob Clark 
1101667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
110205b84911SRob Clark 	if (!msm_obj)
110305b84911SRob Clark 		return -ENOMEM;
1104c8afe684SRob Clark 
1105c8afe684SRob Clark 	msm_obj->flags = flags;
11064cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
1107c8afe684SRob Clark 
11084b85f7f5SRob Clark 	INIT_LIST_HEAD(&msm_obj->vmas);
11094b85f7f5SRob Clark 
111005b84911SRob Clark 	*obj = &msm_obj->base;
11113c9edd9cSThomas Zimmermann 	(*obj)->funcs = &msm_gem_object_funcs;
111205b84911SRob Clark 
111305b84911SRob Clark 	return 0;
111405b84911SRob Clark }
111505b84911SRob Clark 
1116030af2b0SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
111705b84911SRob Clark {
1118f4839bd5SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
11193cbdc8d8SAkhil P Oommen 	struct msm_gem_object *msm_obj;
1120871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
1121f4839bd5SRob Clark 	bool use_vram = false;
112205b84911SRob Clark 	int ret;
112305b84911SRob Clark 
112405b84911SRob Clark 	size = PAGE_ALIGN(size);
112505b84911SRob Clark 
1126c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev))
1127f4839bd5SRob Clark 		use_vram = true;
112886f46f25SJonathan Marek 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1129f4839bd5SRob Clark 		use_vram = true;
1130f4839bd5SRob Clark 
113190643a24SRob Clark 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1132f4839bd5SRob Clark 		return ERR_PTR(-EINVAL);
1133f4839bd5SRob Clark 
11341a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
11351a5dff5dSJordan Crouse 	 * infrastructure grumpy
11361a5dff5dSJordan Crouse 	 */
11371a5dff5dSJordan Crouse 	if (size == 0)
11381a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
11391a5dff5dSJordan Crouse 
11403cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, flags, &obj);
114105b84911SRob Clark 	if (ret)
114205b84911SRob Clark 		goto fail;
114305b84911SRob Clark 
11443cbdc8d8SAkhil P Oommen 	msm_obj = to_msm_bo(obj);
11453cbdc8d8SAkhil P Oommen 
1146f4839bd5SRob Clark 	if (use_vram) {
11474b85f7f5SRob Clark 		struct msm_gem_vma *vma;
1148f4839bd5SRob Clark 		struct page **pages;
1149b3949a9aSHans Verkuil 
1150a694ffedSIskren Chernev 		drm_gem_private_object_init(dev, obj, size);
1151a694ffedSIskren Chernev 
1152a6ae74c9SRob Clark 		msm_gem_lock(obj);
1153f4839bd5SRob Clark 
11544b85f7f5SRob Clark 		vma = add_vma(obj, NULL);
1155a6ae74c9SRob Clark 		msm_gem_unlock(obj);
11564b85f7f5SRob Clark 		if (IS_ERR(vma)) {
11574b85f7f5SRob Clark 			ret = PTR_ERR(vma);
11584b85f7f5SRob Clark 			goto fail;
11594b85f7f5SRob Clark 		}
11604b85f7f5SRob Clark 
11614b85f7f5SRob Clark 		to_msm_bo(obj)->vram_node = &vma->node;
11624b85f7f5SRob Clark 
116345f56690SAlexey Minnekhanov 		/* Call chain get_pages() -> update_inactive() tries to
116445f56690SAlexey Minnekhanov 		 * access msm_obj->mm_list, but it is not initialized yet.
116545f56690SAlexey Minnekhanov 		 * To avoid NULL pointer dereference error, initialize
116645f56690SAlexey Minnekhanov 		 * mm_list to be empty.
116745f56690SAlexey Minnekhanov 		 */
116845f56690SAlexey Minnekhanov 		INIT_LIST_HEAD(&msm_obj->mm_list);
116945f56690SAlexey Minnekhanov 
117007fcad0dSIskren Chernev 		msm_gem_lock(obj);
1171f4839bd5SRob Clark 		pages = get_pages(obj);
117207fcad0dSIskren Chernev 		msm_gem_unlock(obj);
1173f4839bd5SRob Clark 		if (IS_ERR(pages)) {
1174f4839bd5SRob Clark 			ret = PTR_ERR(pages);
1175f4839bd5SRob Clark 			goto fail;
1176f4839bd5SRob Clark 		}
11774b85f7f5SRob Clark 
11784b85f7f5SRob Clark 		vma->iova = physaddr(obj);
1179f4839bd5SRob Clark 	} else {
118005b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
118105b84911SRob Clark 		if (ret)
118205b84911SRob Clark 			goto fail;
11830abdba47SLucas Stach 		/*
11840abdba47SLucas Stach 		 * Our buffers are kept pinned, so allocating them from the
11850abdba47SLucas Stach 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
11860abdba47SLucas Stach 		 * See comments above new_inode() why this is required _and_
11870abdba47SLucas Stach 		 * expected if you're going to pin these pages.
11880abdba47SLucas Stach 		 */
11890abdba47SLucas Stach 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1190871d812aSRob Clark 	}
119105b84911SRob Clark 
1192d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
119364fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1194d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
11953cbdc8d8SAkhil P Oommen 
11966ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
11976ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
11986ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
11996ed0897cSRob Clark 
120005b84911SRob Clark 	return obj;
120105b84911SRob Clark 
120205b84911SRob Clark fail:
1203f7d33950SEmil Velikov 	drm_gem_object_put(obj);
120405b84911SRob Clark 	return ERR_PTR(ret);
120505b84911SRob Clark }
120605b84911SRob Clark 
120705b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
120879f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
120905b84911SRob Clark {
12103cbdc8d8SAkhil P Oommen 	struct msm_drm_private *priv = dev->dev_private;
121105b84911SRob Clark 	struct msm_gem_object *msm_obj;
121205b84911SRob Clark 	struct drm_gem_object *obj;
121379f0e202SRob Clark 	uint32_t size;
121405b84911SRob Clark 	int ret, npages;
121505b84911SRob Clark 
1216871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
1217c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev)) {
12186a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1219871d812aSRob Clark 		return ERR_PTR(-EINVAL);
1220871d812aSRob Clark 	}
1221871d812aSRob Clark 
122279f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
122305b84911SRob Clark 
12243cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
122505b84911SRob Clark 	if (ret)
122605b84911SRob Clark 		goto fail;
122705b84911SRob Clark 
122805b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
122905b84911SRob Clark 
123005b84911SRob Clark 	npages = size / PAGE_SIZE;
123105b84911SRob Clark 
123205b84911SRob Clark 	msm_obj = to_msm_bo(obj);
1233a6ae74c9SRob Clark 	msm_gem_lock(obj);
123405b84911SRob Clark 	msm_obj->sgt = sgt;
12352098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
123605b84911SRob Clark 	if (!msm_obj->pages) {
1237a6ae74c9SRob Clark 		msm_gem_unlock(obj);
123805b84911SRob Clark 		ret = -ENOMEM;
123905b84911SRob Clark 		goto fail;
124005b84911SRob Clark 	}
124105b84911SRob Clark 
1242c67e6279SChristian König 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
12430e08270aSSushmita Susheelendra 	if (ret) {
1244a6ae74c9SRob Clark 		msm_gem_unlock(obj);
124505b84911SRob Clark 		goto fail;
12460e08270aSSushmita Susheelendra 	}
124705b84911SRob Clark 
1248a6ae74c9SRob Clark 	msm_gem_unlock(obj);
12493cbdc8d8SAkhil P Oommen 
1250d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
125164fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1252d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
12533cbdc8d8SAkhil P Oommen 
12546ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
12556ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
12566ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
12576ed0897cSRob Clark 
1258c8afe684SRob Clark 	return obj;
1259c8afe684SRob Clark 
1260c8afe684SRob Clark fail:
1261f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1262c8afe684SRob Clark 	return ERR_PTR(ret);
1263c8afe684SRob Clark }
12648223286dSJordan Crouse 
1265030af2b0SRob Clark void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
12668223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
1267030af2b0SRob Clark 		struct drm_gem_object **bo, uint64_t *iova)
12688223286dSJordan Crouse {
12698223286dSJordan Crouse 	void *vaddr;
1270030af2b0SRob Clark 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
12718223286dSJordan Crouse 	int ret;
12728223286dSJordan Crouse 
12738223286dSJordan Crouse 	if (IS_ERR(obj))
12748223286dSJordan Crouse 		return ERR_CAST(obj);
12758223286dSJordan Crouse 
12768223286dSJordan Crouse 	if (iova) {
12779fe041f6SJordan Crouse 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
127893f7abf1SJordan Crouse 		if (ret)
127993f7abf1SJordan Crouse 			goto err;
12808223286dSJordan Crouse 	}
12818223286dSJordan Crouse 
12828223286dSJordan Crouse 	vaddr = msm_gem_get_vaddr(obj);
1283c9811d0fSWei Yongjun 	if (IS_ERR(vaddr)) {
12847ad0e8cfSJordan Crouse 		msm_gem_unpin_iova(obj, aspace);
128593f7abf1SJordan Crouse 		ret = PTR_ERR(vaddr);
128693f7abf1SJordan Crouse 		goto err;
12878223286dSJordan Crouse 	}
12888223286dSJordan Crouse 
12898223286dSJordan Crouse 	if (bo)
12908223286dSJordan Crouse 		*bo = obj;
12918223286dSJordan Crouse 
12928223286dSJordan Crouse 	return vaddr;
129393f7abf1SJordan Crouse err:
1294f7d33950SEmil Velikov 	drm_gem_object_put(obj);
129593f7abf1SJordan Crouse 
129693f7abf1SJordan Crouse 	return ERR_PTR(ret);
129793f7abf1SJordan Crouse 
12988223286dSJordan Crouse }
12998223286dSJordan Crouse 
13001e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo,
1301030af2b0SRob Clark 		struct msm_gem_address_space *aspace)
13021e29dff0SJordan Crouse {
13031e29dff0SJordan Crouse 	if (IS_ERR_OR_NULL(bo))
13041e29dff0SJordan Crouse 		return;
13051e29dff0SJordan Crouse 
13061e29dff0SJordan Crouse 	msm_gem_put_vaddr(bo);
13077ad0e8cfSJordan Crouse 	msm_gem_unpin_iova(bo, aspace);
1308f7d33950SEmil Velikov 	drm_gem_object_put(bo);
13091e29dff0SJordan Crouse }
13100815d774SJordan Crouse 
13110815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
13120815d774SJordan Crouse {
13130815d774SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
13140815d774SJordan Crouse 	va_list ap;
13150815d774SJordan Crouse 
13160815d774SJordan Crouse 	if (!fmt)
13170815d774SJordan Crouse 		return;
13180815d774SJordan Crouse 
13190815d774SJordan Crouse 	va_start(ap, fmt);
13200815d774SJordan Crouse 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
13210815d774SJordan Crouse 	va_end(ap);
13220815d774SJordan Crouse }
1323