xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision 522f1abf)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8afe684SRob Clark /*
3c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
4c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5c8afe684SRob Clark  */
6c8afe684SRob Clark 
70a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
8f8546caaSYanteng Si #include <linux/vmalloc.h>
9c8afe684SRob Clark #include <linux/spinlock.h>
10c8afe684SRob Clark #include <linux/shmem_fs.h>
1105b84911SRob Clark #include <linux/dma-buf.h>
1201c8f1c4SDan Williams #include <linux/pfn_t.h>
13c8afe684SRob Clark 
14feea39a8SSam Ravnborg #include <drm/drm_prime.h>
15feea39a8SSam Ravnborg 
16c8afe684SRob Clark #include "msm_drv.h"
17fde5de6cSRob Clark #include "msm_fence.h"
18c8afe684SRob Clark #include "msm_gem.h"
197198e6b0SRob Clark #include "msm_gpu.h"
20871d812aSRob Clark #include "msm_mmu.h"
21c8afe684SRob Clark 
223edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj);
230e08270aSSushmita Susheelendra 
24871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
25871d812aSRob Clark {
26871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
28871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29871d812aSRob Clark 			priv->vram.paddr;
30871d812aSRob Clark }
31871d812aSRob Clark 
32072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
33072f1f91SRob Clark {
34072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35072f1f91SRob Clark 	return !msm_obj->vram_node;
36072f1f91SRob Clark }
37072f1f91SRob Clark 
383de433c5SRob Clark /*
393de433c5SRob Clark  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
403de433c5SRob Clark  * API.  Really GPU cache is out of scope here (handled on cmdstream)
413de433c5SRob Clark  * and all we need to do is invalidate newly allocated pages before
423de433c5SRob Clark  * mapping to CPU as uncached/writecombine.
433de433c5SRob Clark  *
443de433c5SRob Clark  * On top of this, we have the added headache, that depending on
453de433c5SRob Clark  * display generation, the display's iommu may be wired up to either
463de433c5SRob Clark  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
473de433c5SRob Clark  * that here we either have dma-direct or iommu ops.
483de433c5SRob Clark  *
493de433c5SRob Clark  * Let this be a cautionary tail of abstraction gone wrong.
503de433c5SRob Clark  */
513de433c5SRob Clark 
523de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj)
533de433c5SRob Clark {
543de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
553de433c5SRob Clark 
567690a33fSMarek Szyprowski 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
573de433c5SRob Clark }
583de433c5SRob Clark 
593de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj)
603de433c5SRob Clark {
613de433c5SRob Clark 	struct device *dev = msm_obj->base.dev->dev;
623de433c5SRob Clark 
637690a33fSMarek Szyprowski 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
643de433c5SRob Clark }
653de433c5SRob Clark 
66871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
670e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68871d812aSRob Clark {
69871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
71871d812aSRob Clark 	dma_addr_t paddr;
72871d812aSRob Clark 	struct page **p;
73871d812aSRob Clark 	int ret, i;
74871d812aSRob Clark 
752098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76871d812aSRob Clark 	if (!p)
77871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
78871d812aSRob Clark 
790e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
804e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
810e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
82871d812aSRob Clark 	if (ret) {
832098105eSMichal Hocko 		kvfree(p);
84871d812aSRob Clark 		return ERR_PTR(ret);
85871d812aSRob Clark 	}
86871d812aSRob Clark 
87871d812aSRob Clark 	paddr = physaddr(obj);
88871d812aSRob Clark 	for (i = 0; i < npages; i++) {
89b3ed524fSChristian König 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
90871d812aSRob Clark 		paddr += PAGE_SIZE;
91871d812aSRob Clark 	}
92871d812aSRob Clark 
93871d812aSRob Clark 	return p;
94871d812aSRob Clark }
95c8afe684SRob Clark 
96c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
97c8afe684SRob Clark {
98c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99c8afe684SRob Clark 
10090643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
10107fcad0dSIskren Chernev 
102c8afe684SRob Clark 	if (!msm_obj->pages) {
103c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
104871d812aSRob Clark 		struct page **p;
105c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
106c8afe684SRob Clark 
107072f1f91SRob Clark 		if (use_pages(obj))
1080cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
109871d812aSRob Clark 		else
110871d812aSRob Clark 			p = get_pages_vram(obj, npages);
111871d812aSRob Clark 
112c8afe684SRob Clark 		if (IS_ERR(p)) {
1136a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114c8afe684SRob Clark 					PTR_ERR(p));
115c8afe684SRob Clark 			return p;
116c8afe684SRob Clark 		}
117c8afe684SRob Clark 
11862e3a3e3SPrakash Kamliya 		msm_obj->pages = p;
11962e3a3e3SPrakash Kamliya 
120707d561fSGerd Hoffmann 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
1211f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
12262e3a3e3SPrakash Kamliya 			void *ptr = ERR_CAST(msm_obj->sgt);
123c8afe684SRob Clark 
1246a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
12562e3a3e3SPrakash Kamliya 			msm_obj->sgt = NULL;
12662e3a3e3SPrakash Kamliya 			return ptr;
12762e3a3e3SPrakash Kamliya 		}
128c8afe684SRob Clark 
129c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
130c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
131c8afe684SRob Clark 		 */
132c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1333de433c5SRob Clark 			sync_for_device(msm_obj);
13464fcbde7SRob Clark 
13564fcbde7SRob Clark 		update_inactive(msm_obj);
136c8afe684SRob Clark 	}
137c8afe684SRob Clark 
138c8afe684SRob Clark 	return msm_obj->pages;
139c8afe684SRob Clark }
140c8afe684SRob Clark 
1410e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj)
1420e08270aSSushmita Susheelendra {
1430e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1440e08270aSSushmita Susheelendra 	struct msm_drm_private *priv = obj->dev->dev_private;
1450e08270aSSushmita Susheelendra 
1460e08270aSSushmita Susheelendra 	spin_lock(&priv->vram.lock);
1470e08270aSSushmita Susheelendra 	drm_mm_remove_node(msm_obj->vram_node);
1480e08270aSSushmita Susheelendra 	spin_unlock(&priv->vram.lock);
1490e08270aSSushmita Susheelendra 
1500e08270aSSushmita Susheelendra 	kvfree(msm_obj->pages);
1510e08270aSSushmita Susheelendra }
1520e08270aSSushmita Susheelendra 
153c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
154c8afe684SRob Clark {
155c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156c8afe684SRob Clark 
157c8afe684SRob Clark 	if (msm_obj->pages) {
1583976626eSBen Hutchings 		if (msm_obj->sgt) {
1593976626eSBen Hutchings 			/* For non-cached buffers, ensure the new
1603976626eSBen Hutchings 			 * pages are clean because display controller,
1613976626eSBen Hutchings 			 * GPU, etc. are not coherent:
162c8afe684SRob Clark 			 */
163c8afe684SRob Clark 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
1643de433c5SRob Clark 				sync_for_cpu(msm_obj);
16562e3a3e3SPrakash Kamliya 
166c8afe684SRob Clark 			sg_free_table(msm_obj->sgt);
167c8afe684SRob Clark 			kfree(msm_obj->sgt);
168b9a31d0dSRob Clark 			msm_obj->sgt = NULL;
1693976626eSBen Hutchings 		}
170c8afe684SRob Clark 
171072f1f91SRob Clark 		if (use_pages(obj))
172c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1730e08270aSSushmita Susheelendra 		else
1740e08270aSSushmita Susheelendra 			put_pages_vram(obj);
175871d812aSRob Clark 
176c8afe684SRob Clark 		msm_obj->pages = NULL;
177c8afe684SRob Clark 	}
178c8afe684SRob Clark }
179c8afe684SRob Clark 
18005b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
18105b84911SRob Clark {
1820e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
18305b84911SRob Clark 	struct page **p;
1840e08270aSSushmita Susheelendra 
185a6ae74c9SRob Clark 	msm_gem_lock(obj);
1860e08270aSSushmita Susheelendra 
18790643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188a6ae74c9SRob Clark 		msm_gem_unlock(obj);
1890e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
1900e08270aSSushmita Susheelendra 	}
1910e08270aSSushmita Susheelendra 
19205b84911SRob Clark 	p = get_pages(obj);
19310f76165SRob Clark 
19410f76165SRob Clark 	if (!IS_ERR(p)) {
19510f76165SRob Clark 		msm_obj->pin_count++;
19610f76165SRob Clark 		update_inactive(msm_obj);
19710f76165SRob Clark 	}
19810f76165SRob Clark 
199a6ae74c9SRob Clark 	msm_gem_unlock(obj);
20005b84911SRob Clark 	return p;
20105b84911SRob Clark }
20205b84911SRob Clark 
20305b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
20405b84911SRob Clark {
20510f76165SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
20610f76165SRob Clark 
20710f76165SRob Clark 	msm_gem_lock(obj);
20810f76165SRob Clark 	msm_obj->pin_count--;
20910f76165SRob Clark 	GEM_WARN_ON(msm_obj->pin_count < 0);
21010f76165SRob Clark 	update_inactive(msm_obj);
21110f76165SRob Clark 	msm_gem_unlock(obj);
21205b84911SRob Clark }
21305b84911SRob Clark 
214af9b3547SJonathan Marek static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
215af9b3547SJonathan Marek {
2169ef36443SJonathan Marek 	if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
217af9b3547SJonathan Marek 		return pgprot_writecombine(prot);
218af9b3547SJonathan Marek 	return prot;
219af9b3547SJonathan Marek }
220af9b3547SJonathan Marek 
2213c9edd9cSThomas Zimmermann static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
222c8afe684SRob Clark {
22311bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
224c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
2250e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
226c8afe684SRob Clark 	struct page **pages;
227c8afe684SRob Clark 	unsigned long pfn;
228c8afe684SRob Clark 	pgoff_t pgoff;
229a5f74ec7SSouptick Joarder 	int err;
230a5f74ec7SSouptick Joarder 	vm_fault_t ret;
231c8afe684SRob Clark 
2320e08270aSSushmita Susheelendra 	/*
2330e08270aSSushmita Susheelendra 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
2340e08270aSSushmita Susheelendra 	 * a reference on obj. So, we dont need to hold one here.
235d78d383aSRob Clark 	 */
236a6ae74c9SRob Clark 	err = msm_gem_lock_interruptible(obj);
237a5f74ec7SSouptick Joarder 	if (err) {
238a5f74ec7SSouptick Joarder 		ret = VM_FAULT_NOPAGE;
239c8afe684SRob Clark 		goto out;
240a5f74ec7SSouptick Joarder 	}
241c8afe684SRob Clark 
24290643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
243a6ae74c9SRob Clark 		msm_gem_unlock(obj);
2440e08270aSSushmita Susheelendra 		return VM_FAULT_SIGBUS;
2450e08270aSSushmita Susheelendra 	}
2460e08270aSSushmita Susheelendra 
247c8afe684SRob Clark 	/* make sure we have pages attached now */
248c8afe684SRob Clark 	pages = get_pages(obj);
249c8afe684SRob Clark 	if (IS_ERR(pages)) {
250a5f74ec7SSouptick Joarder 		ret = vmf_error(PTR_ERR(pages));
251c8afe684SRob Clark 		goto out_unlock;
252c8afe684SRob Clark 	}
253c8afe684SRob Clark 
254c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2551a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
256c8afe684SRob Clark 
257871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
258c8afe684SRob Clark 
2591a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
260c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
261c8afe684SRob Clark 
262a5f74ec7SSouptick Joarder 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
263c8afe684SRob Clark out_unlock:
264a6ae74c9SRob Clark 	msm_gem_unlock(obj);
265c8afe684SRob Clark out:
266a5f74ec7SSouptick Joarder 	return ret;
267c8afe684SRob Clark }
268c8afe684SRob Clark 
269c8afe684SRob Clark /** get mmap offset */
270c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
271c8afe684SRob Clark {
272c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
273c8afe684SRob Clark 	int ret;
274c8afe684SRob Clark 
27590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
276c8afe684SRob Clark 
277c8afe684SRob Clark 	/* Make it mmapable */
278c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
279c8afe684SRob Clark 
280c8afe684SRob Clark 	if (ret) {
2816a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
282c8afe684SRob Clark 		return 0;
283c8afe684SRob Clark 	}
284c8afe684SRob Clark 
285c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
286c8afe684SRob Clark }
287c8afe684SRob Clark 
288c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
289c8afe684SRob Clark {
290c8afe684SRob Clark 	uint64_t offset;
2910e08270aSSushmita Susheelendra 
292a6ae74c9SRob Clark 	msm_gem_lock(obj);
293c8afe684SRob Clark 	offset = mmap_offset(obj);
294a6ae74c9SRob Clark 	msm_gem_unlock(obj);
295c8afe684SRob Clark 	return offset;
296c8afe684SRob Clark }
297c8afe684SRob Clark 
2984b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
2994b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3004b85f7f5SRob Clark {
3014b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3024b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3034b85f7f5SRob Clark 
30490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3050e08270aSSushmita Susheelendra 
3064b85f7f5SRob Clark 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3074b85f7f5SRob Clark 	if (!vma)
3084b85f7f5SRob Clark 		return ERR_PTR(-ENOMEM);
3094b85f7f5SRob Clark 
3104b85f7f5SRob Clark 	vma->aspace = aspace;
3114b85f7f5SRob Clark 
3124b85f7f5SRob Clark 	list_add_tail(&vma->list, &msm_obj->vmas);
3134b85f7f5SRob Clark 
3144b85f7f5SRob Clark 	return vma;
3154b85f7f5SRob Clark }
3164b85f7f5SRob Clark 
3174b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
3184b85f7f5SRob Clark 		struct msm_gem_address_space *aspace)
3194b85f7f5SRob Clark {
3204b85f7f5SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3214b85f7f5SRob Clark 	struct msm_gem_vma *vma;
3224b85f7f5SRob Clark 
32390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3244b85f7f5SRob Clark 
3254b85f7f5SRob Clark 	list_for_each_entry(vma, &msm_obj->vmas, list) {
3264b85f7f5SRob Clark 		if (vma->aspace == aspace)
3274b85f7f5SRob Clark 			return vma;
3284b85f7f5SRob Clark 	}
3294b85f7f5SRob Clark 
3304b85f7f5SRob Clark 	return NULL;
3314b85f7f5SRob Clark }
3324b85f7f5SRob Clark 
3334b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma)
3344b85f7f5SRob Clark {
3354b85f7f5SRob Clark 	if (!vma)
3364b85f7f5SRob Clark 		return;
3374b85f7f5SRob Clark 
3384b85f7f5SRob Clark 	list_del(&vma->list);
3394b85f7f5SRob Clark 	kfree(vma);
3404b85f7f5SRob Clark }
3414b85f7f5SRob Clark 
34237c68900SLee Jones /*
34320d0ae2fSRob Clark  * If close is true, this also closes the VMA (releasing the allocated
34420d0ae2fSRob Clark  * iova range) in addition to removing the iommu mapping.  In the eviction
34520d0ae2fSRob Clark  * case (!close), we keep the iova allocated, but only remove the iommu
34620d0ae2fSRob Clark  * mapping.
34720d0ae2fSRob Clark  */
3484fe5f65eSRob Clark static void
34920d0ae2fSRob Clark put_iova_spaces(struct drm_gem_object *obj, bool close)
3504fe5f65eSRob Clark {
3514fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3529b73bde3SIskren Chernev 	struct msm_gem_vma *vma;
3534fe5f65eSRob Clark 
35490643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3554fe5f65eSRob Clark 
3569b73bde3SIskren Chernev 	list_for_each_entry(vma, &msm_obj->vmas, list) {
357d67f1b6dSBrian Masney 		if (vma->aspace) {
3587ad0e8cfSJordan Crouse 			msm_gem_purge_vma(vma->aspace, vma);
35920d0ae2fSRob Clark 			if (close)
3607ad0e8cfSJordan Crouse 				msm_gem_close_vma(vma->aspace, vma);
361d67f1b6dSBrian Masney 		}
3629b73bde3SIskren Chernev 	}
3639b73bde3SIskren Chernev }
3649b73bde3SIskren Chernev 
3659b73bde3SIskren Chernev /* Called with msm_obj locked */
3669b73bde3SIskren Chernev static void
3679b73bde3SIskren Chernev put_iova_vmas(struct drm_gem_object *obj)
3684fe5f65eSRob Clark {
3694fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3704fe5f65eSRob Clark 	struct msm_gem_vma *vma, *tmp;
3714fe5f65eSRob Clark 
37290643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
3734fe5f65eSRob Clark 
3744fe5f65eSRob Clark 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
3754b85f7f5SRob Clark 		del_vma(vma);
3764fe5f65eSRob Clark 	}
3774fe5f65eSRob Clark }
3784fe5f65eSRob Clark 
379*522f1abfSRob Clark static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
380*522f1abfSRob Clark 		struct msm_gem_address_space *aspace,
381d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
382c8afe684SRob Clark {
3834b85f7f5SRob Clark 	struct msm_gem_vma *vma;
384c8afe684SRob Clark 
38590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
386cb1e3818SRob Clark 
3874b85f7f5SRob Clark 	vma = lookup_vma(obj, aspace);
388871d812aSRob Clark 
3894b85f7f5SRob Clark 	if (!vma) {
390*522f1abfSRob Clark 		int ret;
391*522f1abfSRob Clark 
3924b85f7f5SRob Clark 		vma = add_vma(obj, aspace);
393c0ee9794SJordan Crouse 		if (IS_ERR(vma))
394*522f1abfSRob Clark 			return vma;
3954b85f7f5SRob Clark 
3962ee4b5d2SRob Clark 		ret = msm_gem_init_vma(aspace, vma, obj->size,
397d3b8877eSJonathan Marek 			range_start, range_end);
398c0ee9794SJordan Crouse 		if (ret) {
399c0ee9794SJordan Crouse 			del_vma(vma);
400*522f1abfSRob Clark 			return ERR_PTR(ret);
401c8afe684SRob Clark 		}
402*522f1abfSRob Clark 	} else {
403*522f1abfSRob Clark 		GEM_WARN_ON(vma->iova < range_start);
404*522f1abfSRob Clark 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
4054b85f7f5SRob Clark 	}
4064b85f7f5SRob Clark 
407*522f1abfSRob Clark 	return vma;
408c0ee9794SJordan Crouse }
4094b85f7f5SRob Clark 
410*522f1abfSRob Clark static int msm_gem_pin_iova(struct drm_gem_object *obj, struct msm_gem_vma *vma)
411c0ee9794SJordan Crouse {
412c0ee9794SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
413c0ee9794SJordan Crouse 	struct page **pages;
41464fcbde7SRob Clark 	int ret, prot = IOMMU_READ;
415bbc2cd07SRob Clark 
416bbc2cd07SRob Clark 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
417bbc2cd07SRob Clark 		prot |= IOMMU_WRITE;
418c0ee9794SJordan Crouse 
4190b462d7aSJonathan Marek 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
4200b462d7aSJonathan Marek 		prot |= IOMMU_PRIV;
4210b462d7aSJonathan Marek 
422d12e3390SJonathan Marek 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
423d12e3390SJonathan Marek 		prot |= IOMMU_CACHE;
424d12e3390SJonathan Marek 
42590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
426c0ee9794SJordan Crouse 
42790643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
428c0ee9794SJordan Crouse 		return -EBUSY;
429c0ee9794SJordan Crouse 
430c0ee9794SJordan Crouse 	pages = get_pages(obj);
431c0ee9794SJordan Crouse 	if (IS_ERR(pages))
432c0ee9794SJordan Crouse 		return PTR_ERR(pages);
433c0ee9794SJordan Crouse 
434*522f1abfSRob Clark 	ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
43564fcbde7SRob Clark 
43664fcbde7SRob Clark 	if (!ret)
43764fcbde7SRob Clark 		msm_obj->pin_count++;
43864fcbde7SRob Clark 
43964fcbde7SRob Clark 	return ret;
440c0ee9794SJordan Crouse }
441c0ee9794SJordan Crouse 
442e4b87d22SRob Clark static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
443d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova,
444d3b8877eSJonathan Marek 		u64 range_start, u64 range_end)
445c0ee9794SJordan Crouse {
446*522f1abfSRob Clark 	struct msm_gem_vma *vma;
447c0ee9794SJordan Crouse 	int ret;
448c0ee9794SJordan Crouse 
44990643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
450c0ee9794SJordan Crouse 
451*522f1abfSRob Clark 	vma = get_vma_locked(obj, aspace, range_start, range_end);
452*522f1abfSRob Clark 	if (IS_ERR(vma))
453*522f1abfSRob Clark 		return PTR_ERR(vma);
454c0ee9794SJordan Crouse 
455*522f1abfSRob Clark 	ret = msm_gem_pin_iova(obj, vma);
456c0ee9794SJordan Crouse 	if (!ret)
457*522f1abfSRob Clark 		*iova = vma->iova;
458c0ee9794SJordan Crouse 
459c8afe684SRob Clark 	return ret;
460c8afe684SRob Clark }
461c8afe684SRob Clark 
462e4b87d22SRob Clark /*
463e4b87d22SRob Clark  * get iova and pin it. Should have a matching put
464e4b87d22SRob Clark  * limits iova to specified range (in pages)
465e4b87d22SRob Clark  */
466e4b87d22SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
467e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova,
468e4b87d22SRob Clark 		u64 range_start, u64 range_end)
469e4b87d22SRob Clark {
470e4b87d22SRob Clark 	int ret;
471e4b87d22SRob Clark 
472e4b87d22SRob Clark 	msm_gem_lock(obj);
473e4b87d22SRob Clark 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
474e4b87d22SRob Clark 	msm_gem_unlock(obj);
475e4b87d22SRob Clark 
476e4b87d22SRob Clark 	return ret;
477e4b87d22SRob Clark }
478e4b87d22SRob Clark 
479e4b87d22SRob Clark int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
480e4b87d22SRob Clark 		struct msm_gem_address_space *aspace, uint64_t *iova)
481e4b87d22SRob Clark {
482e4b87d22SRob Clark 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
483e4b87d22SRob Clark }
484e4b87d22SRob Clark 
485d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */
486d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
487d3b8877eSJonathan Marek 		struct msm_gem_address_space *aspace, uint64_t *iova)
488d3b8877eSJonathan Marek {
489d3b8877eSJonathan Marek 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
490d3b8877eSJonathan Marek }
491d3b8877eSJonathan Marek 
4927ad0e8cfSJordan Crouse /*
4937ad0e8cfSJordan Crouse  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
4947ad0e8cfSJordan Crouse  * valid for the life of the object
4957ad0e8cfSJordan Crouse  */
4969fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj,
4979fe041f6SJordan Crouse 		struct msm_gem_address_space *aspace, uint64_t *iova)
4989fe041f6SJordan Crouse {
499*522f1abfSRob Clark 	struct msm_gem_vma *vma;
500*522f1abfSRob Clark 	int ret = 0;
5019fe041f6SJordan Crouse 
502a6ae74c9SRob Clark 	msm_gem_lock(obj);
503*522f1abfSRob Clark 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
504*522f1abfSRob Clark 	if (IS_ERR(vma)) {
505*522f1abfSRob Clark 		ret = PTR_ERR(vma);
506*522f1abfSRob Clark 	} else {
507*522f1abfSRob Clark 		*iova = vma->iova;
508*522f1abfSRob Clark 	}
509a6ae74c9SRob Clark 	msm_gem_unlock(obj);
5109fe041f6SJordan Crouse 
5119fe041f6SJordan Crouse 	return ret;
5129fe041f6SJordan Crouse }
5139fe041f6SJordan Crouse 
5147ad0e8cfSJordan Crouse /*
515e4b87d22SRob Clark  * Locked variant of msm_gem_unpin_iova()
516e4b87d22SRob Clark  */
517e4b87d22SRob Clark void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
518e4b87d22SRob Clark 		struct msm_gem_address_space *aspace)
519e4b87d22SRob Clark {
52064fcbde7SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
521e4b87d22SRob Clark 	struct msm_gem_vma *vma;
522e4b87d22SRob Clark 
52390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
524e4b87d22SRob Clark 
525e4b87d22SRob Clark 	vma = lookup_vma(obj, aspace);
526e4b87d22SRob Clark 
52764fcbde7SRob Clark 	if (!GEM_WARN_ON(!vma)) {
528e4b87d22SRob Clark 		msm_gem_unmap_vma(aspace, vma);
52964fcbde7SRob Clark 
53064fcbde7SRob Clark 		msm_obj->pin_count--;
53164fcbde7SRob Clark 		GEM_WARN_ON(msm_obj->pin_count < 0);
53264fcbde7SRob Clark 
53364fcbde7SRob Clark 		update_inactive(msm_obj);
53464fcbde7SRob Clark 	}
535e4b87d22SRob Clark }
536e4b87d22SRob Clark 
537e4b87d22SRob Clark /*
5387ad0e8cfSJordan Crouse  * Unpin a iova by updating the reference counts. The memory isn't actually
5397ad0e8cfSJordan Crouse  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
5407ad0e8cfSJordan Crouse  * to get rid of it
5417ad0e8cfSJordan Crouse  */
5427ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj,
5438bdcd949SRob Clark 		struct msm_gem_address_space *aspace)
544c8afe684SRob Clark {
545a6ae74c9SRob Clark 	msm_gem_lock(obj);
546e4b87d22SRob Clark 	msm_gem_unpin_iova_locked(obj, aspace);
547a6ae74c9SRob Clark 	msm_gem_unlock(obj);
548c8afe684SRob Clark }
549c8afe684SRob Clark 
550c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
551c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
552c8afe684SRob Clark {
553c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
554c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
555c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
5560815d774SJordan Crouse 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
557c8afe684SRob Clark }
558c8afe684SRob Clark 
559c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
560c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
561c8afe684SRob Clark {
562c8afe684SRob Clark 	struct drm_gem_object *obj;
563c8afe684SRob Clark 	int ret = 0;
564c8afe684SRob Clark 
565c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
566a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
567c8afe684SRob Clark 	if (obj == NULL) {
568c8afe684SRob Clark 		ret = -ENOENT;
569c8afe684SRob Clark 		goto fail;
570c8afe684SRob Clark 	}
571c8afe684SRob Clark 
572c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
573c8afe684SRob Clark 
574f7d33950SEmil Velikov 	drm_gem_object_put(obj);
575c8afe684SRob Clark 
576c8afe684SRob Clark fail:
577c8afe684SRob Clark 	return ret;
578c8afe684SRob Clark }
579c8afe684SRob Clark 
580fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
581c8afe684SRob Clark {
5820e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5830e08270aSSushmita Susheelendra 	int ret = 0;
5840e08270aSSushmita Susheelendra 
58590643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
586e4b87d22SRob Clark 
5878b6b7d84SDaniel Vetter 	if (obj->import_attach)
5888b6b7d84SDaniel Vetter 		return ERR_PTR(-ENODEV);
5898b6b7d84SDaniel Vetter 
59090643a24SRob Clark 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
5916a41da17SMamta Shukla 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
592fad33f4bSRob Clark 			msm_obj->madv, madv);
5930e08270aSSushmita Susheelendra 		return ERR_PTR(-EBUSY);
594c8afe684SRob Clark 	}
595c8afe684SRob Clark 
5960e08270aSSushmita Susheelendra 	/* increment vmap_count *before* vmap() call, so shrinker can
597a6ae74c9SRob Clark 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
5980e08270aSSushmita Susheelendra 	 * This guarantees that we won't try to msm_gem_vunmap() this
5990e08270aSSushmita Susheelendra 	 * same object from within the vmap() call (while we already
600a6ae74c9SRob Clark 	 * hold msm_obj lock)
6010e08270aSSushmita Susheelendra 	 */
6020e08270aSSushmita Susheelendra 	msm_obj->vmap_count++;
6030e08270aSSushmita Susheelendra 
6040e08270aSSushmita Susheelendra 	if (!msm_obj->vaddr) {
6050e08270aSSushmita Susheelendra 		struct page **pages = get_pages(obj);
6060e08270aSSushmita Susheelendra 		if (IS_ERR(pages)) {
6070e08270aSSushmita Susheelendra 			ret = PTR_ERR(pages);
6080e08270aSSushmita Susheelendra 			goto fail;
6090e08270aSSushmita Susheelendra 		}
6100e08270aSSushmita Susheelendra 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
611af9b3547SJonathan Marek 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
6120e08270aSSushmita Susheelendra 		if (msm_obj->vaddr == NULL) {
6130e08270aSSushmita Susheelendra 			ret = -ENOMEM;
6140e08270aSSushmita Susheelendra 			goto fail;
6150e08270aSSushmita Susheelendra 		}
61610f76165SRob Clark 
61710f76165SRob Clark 		update_inactive(msm_obj);
6180e08270aSSushmita Susheelendra 	}
6190e08270aSSushmita Susheelendra 
6200e08270aSSushmita Susheelendra 	return msm_obj->vaddr;
6210e08270aSSushmita Susheelendra 
6220e08270aSSushmita Susheelendra fail:
623e1e9db2cSRob Clark 	msm_obj->vmap_count--;
6240e08270aSSushmita Susheelendra 	return ERR_PTR(ret);
62518f23049SRob Clark }
62618f23049SRob Clark 
627e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
628e4b87d22SRob Clark {
629e4b87d22SRob Clark 	return get_vaddr(obj, MSM_MADV_WILLNEED);
630e4b87d22SRob Clark }
631e4b87d22SRob Clark 
632fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
633fad33f4bSRob Clark {
634e4b87d22SRob Clark 	void *ret;
635e4b87d22SRob Clark 
636e4b87d22SRob Clark 	msm_gem_lock(obj);
637e4b87d22SRob Clark 	ret = msm_gem_get_vaddr_locked(obj);
638e4b87d22SRob Clark 	msm_gem_unlock(obj);
639e4b87d22SRob Clark 
640e4b87d22SRob Clark 	return ret;
641fad33f4bSRob Clark }
642fad33f4bSRob Clark 
643fad33f4bSRob Clark /*
644fad33f4bSRob Clark  * Don't use this!  It is for the very special case of dumping
645fad33f4bSRob Clark  * submits from GPU hangs or faults, were the bo may already
646fad33f4bSRob Clark  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
647fad33f4bSRob Clark  * active list.
648fad33f4bSRob Clark  */
649fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
650fad33f4bSRob Clark {
651fad33f4bSRob Clark 	return get_vaddr(obj, __MSM_MADV_PURGED);
652fad33f4bSRob Clark }
653fad33f4bSRob Clark 
654e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
65518f23049SRob Clark {
6560e08270aSSushmita Susheelendra 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
6570e08270aSSushmita Susheelendra 
65890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
65990643a24SRob Clark 	GEM_WARN_ON(msm_obj->vmap_count < 1);
660e4b87d22SRob Clark 
6610e08270aSSushmita Susheelendra 	msm_obj->vmap_count--;
6624cd33c48SRob Clark }
6630e08270aSSushmita Susheelendra 
6640e08270aSSushmita Susheelendra void msm_gem_put_vaddr(struct drm_gem_object *obj)
6654cd33c48SRob Clark {
666a6ae74c9SRob Clark 	msm_gem_lock(obj);
667e4b87d22SRob Clark 	msm_gem_put_vaddr_locked(obj);
668a6ae74c9SRob Clark 	msm_gem_unlock(obj);
6694cd33c48SRob Clark }
6704cd33c48SRob Clark 
6714cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
6724cd33c48SRob Clark  * false or -errno.
6734cd33c48SRob Clark  */
6744cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
6754cd33c48SRob Clark {
6764cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
67768209390SRob Clark 
678a6ae74c9SRob Clark 	msm_gem_lock(obj);
679c8afe684SRob Clark 
680c8afe684SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
681c8afe684SRob Clark 		msm_obj->madv = madv;
682c8afe684SRob Clark 
6830e08270aSSushmita Susheelendra 	madv = msm_obj->madv;
6840e08270aSSushmita Susheelendra 
6853edfa30fSRob Clark 	/* If the obj is inactive, we might need to move it
6863edfa30fSRob Clark 	 * between inactive lists
6873edfa30fSRob Clark 	 */
6883edfa30fSRob Clark 	if (msm_obj->active_count == 0)
6893edfa30fSRob Clark 		update_inactive(msm_obj);
6903edfa30fSRob Clark 
691a6ae74c9SRob Clark 	msm_gem_unlock(obj);
6920e08270aSSushmita Susheelendra 
6930e08270aSSushmita Susheelendra 	return (madv != __MSM_MADV_PURGED);
694c8afe684SRob Clark }
695c8afe684SRob Clark 
696599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj)
69768209390SRob Clark {
69868209390SRob Clark 	struct drm_device *dev = obj->dev;
69968209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70068209390SRob Clark 
70181d4d597SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
70290643a24SRob Clark 	GEM_WARN_ON(!is_purgeable(msm_obj));
70368209390SRob Clark 
70420d0ae2fSRob Clark 	/* Get rid of any iommu mapping(s): */
70520d0ae2fSRob Clark 	put_iova_spaces(obj, true);
7060e08270aSSushmita Susheelendra 
707599089c6SRob Clark 	msm_gem_vunmap(obj);
70868209390SRob Clark 
70981d4d597SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
71081d4d597SRob Clark 
71168209390SRob Clark 	put_pages(obj);
71268209390SRob Clark 
7139b73bde3SIskren Chernev 	put_iova_vmas(obj);
7149b73bde3SIskren Chernev 
71568209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
71625ed38b3SRob Clark 	update_inactive(msm_obj);
71768209390SRob Clark 
71868209390SRob Clark 	drm_gem_free_mmap_offset(obj);
71968209390SRob Clark 
72068209390SRob Clark 	/* Our goal here is to return as much of the memory as
72168209390SRob Clark 	 * is possible back to the system as we are called from OOM.
72268209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
72368209390SRob Clark 	 * backing pages, *now*.
72468209390SRob Clark 	 */
72568209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
72668209390SRob Clark 
72768209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
72868209390SRob Clark 			0, (loff_t)-1);
72968209390SRob Clark }
73068209390SRob Clark 
73137c68900SLee Jones /*
73263f17ef8SRob Clark  * Unpin the backing pages and make them available to be swapped out.
73363f17ef8SRob Clark  */
73463f17ef8SRob Clark void msm_gem_evict(struct drm_gem_object *obj)
73563f17ef8SRob Clark {
73663f17ef8SRob Clark 	struct drm_device *dev = obj->dev;
73763f17ef8SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
73863f17ef8SRob Clark 
73963f17ef8SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
74063f17ef8SRob Clark 	GEM_WARN_ON(is_unevictable(msm_obj));
74163f17ef8SRob Clark 	GEM_WARN_ON(!msm_obj->evictable);
74263f17ef8SRob Clark 	GEM_WARN_ON(msm_obj->active_count);
74363f17ef8SRob Clark 
74463f17ef8SRob Clark 	/* Get rid of any iommu mapping(s): */
74563f17ef8SRob Clark 	put_iova_spaces(obj, false);
74663f17ef8SRob Clark 
74763f17ef8SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
74863f17ef8SRob Clark 
74963f17ef8SRob Clark 	put_pages(obj);
75063f17ef8SRob Clark 
75163f17ef8SRob Clark 	update_inactive(msm_obj);
75263f17ef8SRob Clark }
75363f17ef8SRob Clark 
754599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj)
755e1e9db2cSRob Clark {
756e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
757e1e9db2cSRob Clark 
75890643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
7590e08270aSSushmita Susheelendra 
76090643a24SRob Clark 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
761e1e9db2cSRob Clark 		return;
762e1e9db2cSRob Clark 
763e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
764e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
765e1e9db2cSRob Clark }
766e1e9db2cSRob Clark 
7679d8baa2bSAkhil P Oommen void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
7687198e6b0SRob Clark {
7697198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
770d984457bSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
771d984457bSRob Clark 
772d984457bSRob Clark 	might_sleep();
77390643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
77490643a24SRob Clark 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
77590643a24SRob Clark 	GEM_WARN_ON(msm_obj->dontneed);
7769d8baa2bSAkhil P Oommen 
777ab5c54cbSRob Clark 	if (msm_obj->active_count++ == 0) {
778d984457bSRob Clark 		mutex_lock(&priv->mm_lock);
77964fcbde7SRob Clark 		if (msm_obj->evictable)
78064fcbde7SRob Clark 			mark_unevictable(msm_obj);
781a83cc4fbSBaokun Li 		list_move_tail(&msm_obj->mm_list, &gpu->active_list);
782d984457bSRob Clark 		mutex_unlock(&priv->mm_lock);
7837198e6b0SRob Clark 	}
7849d8baa2bSAkhil P Oommen }
7857198e6b0SRob Clark 
7869d8baa2bSAkhil P Oommen void msm_gem_active_put(struct drm_gem_object *obj)
7877198e6b0SRob Clark {
7887198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
7897198e6b0SRob Clark 
790d984457bSRob Clark 	might_sleep();
79190643a24SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(obj));
7927198e6b0SRob Clark 
793ab5c54cbSRob Clark 	if (--msm_obj->active_count == 0) {
7943edfa30fSRob Clark 		update_inactive(msm_obj);
7957198e6b0SRob Clark 	}
7969d8baa2bSAkhil P Oommen }
7977198e6b0SRob Clark 
7983edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj)
7993edfa30fSRob Clark {
8003edfa30fSRob Clark 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
8013edfa30fSRob Clark 
80264fcbde7SRob Clark 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
80364fcbde7SRob Clark 
80464fcbde7SRob Clark 	if (msm_obj->active_count != 0)
80564fcbde7SRob Clark 		return;
80664fcbde7SRob Clark 
8073edfa30fSRob Clark 	mutex_lock(&priv->mm_lock);
8083edfa30fSRob Clark 
809cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
8100054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
81164fcbde7SRob Clark 	if (msm_obj->evictable)
81264fcbde7SRob Clark 		mark_unevictable(msm_obj);
813cc8a4d5aSRob Clark 
814cc8a4d5aSRob Clark 	list_del(&msm_obj->mm_list);
81564fcbde7SRob Clark 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
8163edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
81764fcbde7SRob Clark 		mark_evictable(msm_obj);
818cc8a4d5aSRob Clark 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
8193edfa30fSRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
8200054eeb7SRob Clark 		mark_purgeable(msm_obj);
821cc8a4d5aSRob Clark 	} else {
82264fcbde7SRob Clark 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
82364fcbde7SRob Clark 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
824cc8a4d5aSRob Clark 	}
8253edfa30fSRob Clark 
8263edfa30fSRob Clark 	mutex_unlock(&priv->mm_lock);
8273edfa30fSRob Clark }
8283edfa30fSRob Clark 
829ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
830ba00c3f2SRob Clark {
831b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
832f755e227SChris Wilson 	unsigned long remain =
833f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
834f755e227SChris Wilson 	long ret;
835b6295f9aSRob Clark 
836d3fae3b3SChristian König 	ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
837f755e227SChris Wilson 	if (ret == 0)
838f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
839f755e227SChris Wilson 	else if (ret < 0)
840f755e227SChris Wilson 		return ret;
841ba00c3f2SRob Clark 
8427198e6b0SRob Clark 	/* TODO cache maintenance */
8437198e6b0SRob Clark 
844b6295f9aSRob Clark 	return 0;
8457198e6b0SRob Clark }
8467198e6b0SRob Clark 
8477198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
8487198e6b0SRob Clark {
8497198e6b0SRob Clark 	/* TODO cache maintenance */
850c8afe684SRob Clark 	return 0;
851c8afe684SRob Clark }
852c8afe684SRob Clark 
853c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
854528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
855528107c8SRob Clark 		struct msm_gem_stats *stats)
856c8afe684SRob Clark {
857c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
85852791eeeSChristian König 	struct dma_resv *robj = obj->resv;
8594b85f7f5SRob Clark 	struct msm_gem_vma *vma;
860c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
8614cd33c48SRob Clark 	const char *madv;
862c8afe684SRob Clark 
863a6ae74c9SRob Clark 	msm_gem_lock(obj);
864b6295f9aSRob Clark 
865528107c8SRob Clark 	stats->all.count++;
866528107c8SRob Clark 	stats->all.size += obj->size;
867528107c8SRob Clark 
868528107c8SRob Clark 	if (is_active(msm_obj)) {
869528107c8SRob Clark 		stats->active.count++;
870528107c8SRob Clark 		stats->active.size += obj->size;
871528107c8SRob Clark 	}
872528107c8SRob Clark 
873f48f3563SRob Clark 	if (msm_obj->pages) {
874f48f3563SRob Clark 		stats->resident.count++;
875f48f3563SRob Clark 		stats->resident.size += obj->size;
876f48f3563SRob Clark 	}
877f48f3563SRob Clark 
8784cd33c48SRob Clark 	switch (msm_obj->madv) {
8794cd33c48SRob Clark 	case __MSM_MADV_PURGED:
880528107c8SRob Clark 		stats->purged.count++;
881528107c8SRob Clark 		stats->purged.size += obj->size;
8824cd33c48SRob Clark 		madv = " purged";
8834cd33c48SRob Clark 		break;
8844cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
8850054eeb7SRob Clark 		stats->purgeable.count++;
8860054eeb7SRob Clark 		stats->purgeable.size += obj->size;
8874cd33c48SRob Clark 		madv = " purgeable";
8884cd33c48SRob Clark 		break;
8894cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
8904cd33c48SRob Clark 	default:
8914cd33c48SRob Clark 		madv = "";
8924cd33c48SRob Clark 		break;
8934cd33c48SRob Clark 	}
8944cd33c48SRob Clark 
895575f0485SJordan Crouse 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
8967198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
8972c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
898667ce33eSRob Clark 			off, msm_obj->vaddr);
899667ce33eSRob Clark 
9000815d774SJordan Crouse 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
901667ce33eSRob Clark 
902575f0485SJordan Crouse 	if (!list_empty(&msm_obj->vmas)) {
903575f0485SJordan Crouse 
904575f0485SJordan Crouse 		seq_puts(m, "      vmas:");
905575f0485SJordan Crouse 
90625faf2f2SRob Clark 		list_for_each_entry(vma, &msm_obj->vmas, list) {
90725faf2f2SRob Clark 			const char *name, *comm;
90825faf2f2SRob Clark 			if (vma->aspace) {
90925faf2f2SRob Clark 				struct msm_gem_address_space *aspace = vma->aspace;
91025faf2f2SRob Clark 				struct task_struct *task =
91125faf2f2SRob Clark 					get_pid_task(aspace->pid, PIDTYPE_PID);
91225faf2f2SRob Clark 				if (task) {
91325faf2f2SRob Clark 					comm = kstrdup(task->comm, GFP_KERNEL);
91425faf2f2SRob Clark 				} else {
91525faf2f2SRob Clark 					comm = NULL;
91625faf2f2SRob Clark 				}
91725faf2f2SRob Clark 				name = aspace->name;
91825faf2f2SRob Clark 			} else {
91925faf2f2SRob Clark 				name = comm = NULL;
92025faf2f2SRob Clark 			}
92125faf2f2SRob Clark 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
92225faf2f2SRob Clark 				name, comm ? ":" : "", comm ? comm : "",
92325faf2f2SRob Clark 				vma->aspace, vma->iova,
92425faf2f2SRob Clark 				vma->mapped ? "mapped" : "unmapped",
925ca35ab2aSRob Clark 				msm_gem_vma_inuse(vma));
92625faf2f2SRob Clark 			kfree(comm);
92725faf2f2SRob Clark 		}
928575f0485SJordan Crouse 
929575f0485SJordan Crouse 		seq_puts(m, "\n");
930575f0485SJordan Crouse 	}
931b6295f9aSRob Clark 
932f19ee2f3SChristian König 	dma_resv_describe(robj, m);
933a6ae74c9SRob Clark 	msm_gem_unlock(obj);
934c8afe684SRob Clark }
935c8afe684SRob Clark 
936c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
937c8afe684SRob Clark {
938528107c8SRob Clark 	struct msm_gem_stats stats = {};
939c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
940c8afe684SRob Clark 
9410815d774SJordan Crouse 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
9426ed0897cSRob Clark 	list_for_each_entry(msm_obj, list, node) {
943c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
944575f0485SJordan Crouse 		seq_puts(m, "   ");
945528107c8SRob Clark 		msm_gem_describe(obj, m, &stats);
946c8afe684SRob Clark 	}
947c8afe684SRob Clark 
948528107c8SRob Clark 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
949528107c8SRob Clark 			stats.all.count, stats.all.size);
950528107c8SRob Clark 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
951528107c8SRob Clark 			stats.active.count, stats.active.size);
952f48f3563SRob Clark 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
953f48f3563SRob Clark 			stats.resident.count, stats.resident.size);
954f1902c6bSColin Ian King 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
9550054eeb7SRob Clark 			stats.purgeable.count, stats.purgeable.size);
956528107c8SRob Clark 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
957528107c8SRob Clark 			stats.purged.count, stats.purged.size);
958c8afe684SRob Clark }
959c8afe684SRob Clark #endif
960c8afe684SRob Clark 
961030af2b0SRob Clark /* don't call directly!  Use drm_gem_object_put() */
962c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
963c8afe684SRob Clark {
964c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
96548e7f183SKristian H. Kristensen 	struct drm_device *dev = obj->dev;
96648e7f183SKristian H. Kristensen 	struct msm_drm_private *priv = dev->dev_private;
96748e7f183SKristian H. Kristensen 
9686ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
9696ed0897cSRob Clark 	list_del(&msm_obj->node);
9706ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
9716ed0897cSRob Clark 
972d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
973cc8a4d5aSRob Clark 	if (msm_obj->dontneed)
9740054eeb7SRob Clark 		mark_unpurgeable(msm_obj);
975c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
976d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
977c8afe684SRob Clark 
978a6ae74c9SRob Clark 	msm_gem_lock(obj);
979c8afe684SRob Clark 
980c8afe684SRob Clark 	/* object should not be on active list: */
98190643a24SRob Clark 	GEM_WARN_ON(is_active(msm_obj));
982c8afe684SRob Clark 
98320d0ae2fSRob Clark 	put_iova_spaces(obj, true);
984c8afe684SRob Clark 
98505b84911SRob Clark 	if (obj->import_attach) {
98690643a24SRob Clark 		GEM_WARN_ON(msm_obj->vaddr);
98705b84911SRob Clark 
98805b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
98905b84911SRob Clark 		 * ours, just free the array we allocated:
99005b84911SRob Clark 		 */
9912098105eSMichal Hocko 		kvfree(msm_obj->pages);
99205b84911SRob Clark 
99357f04815SRob Clark 		put_iova_vmas(obj);
99457f04815SRob Clark 
9956c0e3ea2SRob Clark 		/* dma_buf_detach() grabs resv lock, so we need to unlock
9966c0e3ea2SRob Clark 		 * prior to drm_prime_gem_destroy
9976c0e3ea2SRob Clark 		 */
9986c0e3ea2SRob Clark 		msm_gem_unlock(obj);
9996c0e3ea2SRob Clark 
1000f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
100105b84911SRob Clark 	} else {
1002599089c6SRob Clark 		msm_gem_vunmap(obj);
1003c8afe684SRob Clark 		put_pages(obj);
100457f04815SRob Clark 		put_iova_vmas(obj);
10056c0e3ea2SRob Clark 		msm_gem_unlock(obj);
100605b84911SRob Clark 	}
1007c8afe684SRob Clark 
1008c8afe684SRob Clark 	drm_gem_object_release(obj);
1009c8afe684SRob Clark 
1010c8afe684SRob Clark 	kfree(msm_obj);
1011c8afe684SRob Clark }
1012c8afe684SRob Clark 
1013510410bfSThomas Zimmermann static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1014510410bfSThomas Zimmermann {
1015510410bfSThomas Zimmermann 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1016510410bfSThomas Zimmermann 
10173466d9e2SDouglas Anderson 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
1018510410bfSThomas Zimmermann 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1019510410bfSThomas Zimmermann 
1020510410bfSThomas Zimmermann 	return 0;
1021510410bfSThomas Zimmermann }
1022510410bfSThomas Zimmermann 
1023c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
1024c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
10250815d774SJordan Crouse 		uint32_t size, uint32_t flags, uint32_t *handle,
10260815d774SJordan Crouse 		char *name)
1027c8afe684SRob Clark {
1028c8afe684SRob Clark 	struct drm_gem_object *obj;
1029c8afe684SRob Clark 	int ret;
1030c8afe684SRob Clark 
1031c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
1032c8afe684SRob Clark 
1033c8afe684SRob Clark 	if (IS_ERR(obj))
1034c8afe684SRob Clark 		return PTR_ERR(obj);
1035c8afe684SRob Clark 
10360815d774SJordan Crouse 	if (name)
10370815d774SJordan Crouse 		msm_gem_object_set_name(obj, "%s", name);
10380815d774SJordan Crouse 
1039c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
1040c8afe684SRob Clark 
1041c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
1042f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1043c8afe684SRob Clark 
1044c8afe684SRob Clark 	return ret;
1045c8afe684SRob Clark }
1046c8afe684SRob Clark 
10473c9edd9cSThomas Zimmermann static const struct vm_operations_struct vm_ops = {
10483c9edd9cSThomas Zimmermann 	.fault = msm_gem_fault,
10493c9edd9cSThomas Zimmermann 	.open = drm_gem_vm_open,
10503c9edd9cSThomas Zimmermann 	.close = drm_gem_vm_close,
10513c9edd9cSThomas Zimmermann };
10523c9edd9cSThomas Zimmermann 
10533c9edd9cSThomas Zimmermann static const struct drm_gem_object_funcs msm_gem_object_funcs = {
10543c9edd9cSThomas Zimmermann 	.free = msm_gem_free_object,
10553c9edd9cSThomas Zimmermann 	.pin = msm_gem_prime_pin,
10563c9edd9cSThomas Zimmermann 	.unpin = msm_gem_prime_unpin,
10573c9edd9cSThomas Zimmermann 	.get_sg_table = msm_gem_prime_get_sg_table,
10583c9edd9cSThomas Zimmermann 	.vmap = msm_gem_prime_vmap,
10593c9edd9cSThomas Zimmermann 	.vunmap = msm_gem_prime_vunmap,
1060510410bfSThomas Zimmermann 	.mmap = msm_gem_object_mmap,
10613c9edd9cSThomas Zimmermann 	.vm_ops = &vm_ops,
10623c9edd9cSThomas Zimmermann };
10633c9edd9cSThomas Zimmermann 
106405b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
106505b84911SRob Clark 		uint32_t size, uint32_t flags,
10663cbdc8d8SAkhil P Oommen 		struct drm_gem_object **obj)
1067c8afe684SRob Clark {
1068d12e3390SJonathan Marek 	struct msm_drm_private *priv = dev->dev_private;
1069c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
1070c8afe684SRob Clark 
1071c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
1072c8afe684SRob Clark 	case MSM_BO_UNCACHED:
1073c8afe684SRob Clark 	case MSM_BO_CACHED:
1074c8afe684SRob Clark 	case MSM_BO_WC:
1075c8afe684SRob Clark 		break;
1076d12e3390SJonathan Marek 	case MSM_BO_CACHED_COHERENT:
1077d12e3390SJonathan Marek 		if (priv->has_cached_coherent)
1078d12e3390SJonathan Marek 			break;
1079e181ad43SGustavo A. R. Silva 		fallthrough;
1080c8afe684SRob Clark 	default:
10812d1d175aSRob Clark 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1082c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
108305b84911SRob Clark 		return -EINVAL;
1084c8afe684SRob Clark 	}
1085c8afe684SRob Clark 
1086667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
108705b84911SRob Clark 	if (!msm_obj)
108805b84911SRob Clark 		return -ENOMEM;
1089c8afe684SRob Clark 
1090c8afe684SRob Clark 	msm_obj->flags = flags;
10914cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
1092c8afe684SRob Clark 
1093027d052aSDan Carpenter 	INIT_LIST_HEAD(&msm_obj->node);
10944b85f7f5SRob Clark 	INIT_LIST_HEAD(&msm_obj->vmas);
10954b85f7f5SRob Clark 
109605b84911SRob Clark 	*obj = &msm_obj->base;
10973c9edd9cSThomas Zimmermann 	(*obj)->funcs = &msm_gem_object_funcs;
109805b84911SRob Clark 
109905b84911SRob Clark 	return 0;
110005b84911SRob Clark }
110105b84911SRob Clark 
1102030af2b0SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
110305b84911SRob Clark {
1104f4839bd5SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
11053cbdc8d8SAkhil P Oommen 	struct msm_gem_object *msm_obj;
1106871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
1107f4839bd5SRob Clark 	bool use_vram = false;
110805b84911SRob Clark 	int ret;
110905b84911SRob Clark 
111005b84911SRob Clark 	size = PAGE_ALIGN(size);
111105b84911SRob Clark 
1112c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev))
1113f4839bd5SRob Clark 		use_vram = true;
111486f46f25SJonathan Marek 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1115f4839bd5SRob Clark 		use_vram = true;
1116f4839bd5SRob Clark 
111790643a24SRob Clark 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1118f4839bd5SRob Clark 		return ERR_PTR(-EINVAL);
1119f4839bd5SRob Clark 
11201a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
11211a5dff5dSJordan Crouse 	 * infrastructure grumpy
11221a5dff5dSJordan Crouse 	 */
11231a5dff5dSJordan Crouse 	if (size == 0)
11241a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
11251a5dff5dSJordan Crouse 
11263cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, flags, &obj);
112705b84911SRob Clark 	if (ret)
11282203bd0eSDan Carpenter 		return ERR_PTR(ret);
112905b84911SRob Clark 
11303cbdc8d8SAkhil P Oommen 	msm_obj = to_msm_bo(obj);
11313cbdc8d8SAkhil P Oommen 
1132f4839bd5SRob Clark 	if (use_vram) {
11334b85f7f5SRob Clark 		struct msm_gem_vma *vma;
1134f4839bd5SRob Clark 		struct page **pages;
1135b3949a9aSHans Verkuil 
1136a694ffedSIskren Chernev 		drm_gem_private_object_init(dev, obj, size);
1137a694ffedSIskren Chernev 
1138a6ae74c9SRob Clark 		msm_gem_lock(obj);
1139f4839bd5SRob Clark 
11404b85f7f5SRob Clark 		vma = add_vma(obj, NULL);
1141a6ae74c9SRob Clark 		msm_gem_unlock(obj);
11424b85f7f5SRob Clark 		if (IS_ERR(vma)) {
11434b85f7f5SRob Clark 			ret = PTR_ERR(vma);
11444b85f7f5SRob Clark 			goto fail;
11454b85f7f5SRob Clark 		}
11464b85f7f5SRob Clark 
11474b85f7f5SRob Clark 		to_msm_bo(obj)->vram_node = &vma->node;
11484b85f7f5SRob Clark 
114945f56690SAlexey Minnekhanov 		/* Call chain get_pages() -> update_inactive() tries to
115045f56690SAlexey Minnekhanov 		 * access msm_obj->mm_list, but it is not initialized yet.
115145f56690SAlexey Minnekhanov 		 * To avoid NULL pointer dereference error, initialize
115245f56690SAlexey Minnekhanov 		 * mm_list to be empty.
115345f56690SAlexey Minnekhanov 		 */
115445f56690SAlexey Minnekhanov 		INIT_LIST_HEAD(&msm_obj->mm_list);
115545f56690SAlexey Minnekhanov 
115607fcad0dSIskren Chernev 		msm_gem_lock(obj);
1157f4839bd5SRob Clark 		pages = get_pages(obj);
115807fcad0dSIskren Chernev 		msm_gem_unlock(obj);
1159f4839bd5SRob Clark 		if (IS_ERR(pages)) {
1160f4839bd5SRob Clark 			ret = PTR_ERR(pages);
1161f4839bd5SRob Clark 			goto fail;
1162f4839bd5SRob Clark 		}
11634b85f7f5SRob Clark 
11644b85f7f5SRob Clark 		vma->iova = physaddr(obj);
1165f4839bd5SRob Clark 	} else {
116605b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
116705b84911SRob Clark 		if (ret)
116805b84911SRob Clark 			goto fail;
11690abdba47SLucas Stach 		/*
11700abdba47SLucas Stach 		 * Our buffers are kept pinned, so allocating them from the
11710abdba47SLucas Stach 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
11720abdba47SLucas Stach 		 * See comments above new_inode() why this is required _and_
11730abdba47SLucas Stach 		 * expected if you're going to pin these pages.
11740abdba47SLucas Stach 		 */
11750abdba47SLucas Stach 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1176871d812aSRob Clark 	}
117705b84911SRob Clark 
1178d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
117964fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1180d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
11813cbdc8d8SAkhil P Oommen 
11826ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
11836ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
11846ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
11856ed0897cSRob Clark 
118605b84911SRob Clark 	return obj;
118705b84911SRob Clark 
118805b84911SRob Clark fail:
1189f7d33950SEmil Velikov 	drm_gem_object_put(obj);
119005b84911SRob Clark 	return ERR_PTR(ret);
119105b84911SRob Clark }
119205b84911SRob Clark 
119305b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
119479f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
119505b84911SRob Clark {
11963cbdc8d8SAkhil P Oommen 	struct msm_drm_private *priv = dev->dev_private;
119705b84911SRob Clark 	struct msm_gem_object *msm_obj;
119805b84911SRob Clark 	struct drm_gem_object *obj;
119979f0e202SRob Clark 	uint32_t size;
120005b84911SRob Clark 	int ret, npages;
120105b84911SRob Clark 
1202871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
1203c2052a4eSJonathan Marek 	if (!msm_use_mmu(dev)) {
12046a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1205871d812aSRob Clark 		return ERR_PTR(-EINVAL);
1206871d812aSRob Clark 	}
1207871d812aSRob Clark 
120879f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
120905b84911SRob Clark 
12103cbdc8d8SAkhil P Oommen 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
121105b84911SRob Clark 	if (ret)
12122203bd0eSDan Carpenter 		return ERR_PTR(ret);
121305b84911SRob Clark 
121405b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
121505b84911SRob Clark 
121605b84911SRob Clark 	npages = size / PAGE_SIZE;
121705b84911SRob Clark 
121805b84911SRob Clark 	msm_obj = to_msm_bo(obj);
1219a6ae74c9SRob Clark 	msm_gem_lock(obj);
122005b84911SRob Clark 	msm_obj->sgt = sgt;
12212098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
122205b84911SRob Clark 	if (!msm_obj->pages) {
1223a6ae74c9SRob Clark 		msm_gem_unlock(obj);
122405b84911SRob Clark 		ret = -ENOMEM;
122505b84911SRob Clark 		goto fail;
122605b84911SRob Clark 	}
122705b84911SRob Clark 
1228c67e6279SChristian König 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
12290e08270aSSushmita Susheelendra 	if (ret) {
1230a6ae74c9SRob Clark 		msm_gem_unlock(obj);
123105b84911SRob Clark 		goto fail;
12320e08270aSSushmita Susheelendra 	}
123305b84911SRob Clark 
1234a6ae74c9SRob Clark 	msm_gem_unlock(obj);
12353cbdc8d8SAkhil P Oommen 
1236d984457bSRob Clark 	mutex_lock(&priv->mm_lock);
123764fcbde7SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1238d984457bSRob Clark 	mutex_unlock(&priv->mm_lock);
12393cbdc8d8SAkhil P Oommen 
12406ed0897cSRob Clark 	mutex_lock(&priv->obj_lock);
12416ed0897cSRob Clark 	list_add_tail(&msm_obj->node, &priv->objects);
12426ed0897cSRob Clark 	mutex_unlock(&priv->obj_lock);
12436ed0897cSRob Clark 
1244c8afe684SRob Clark 	return obj;
1245c8afe684SRob Clark 
1246c8afe684SRob Clark fail:
1247f7d33950SEmil Velikov 	drm_gem_object_put(obj);
1248c8afe684SRob Clark 	return ERR_PTR(ret);
1249c8afe684SRob Clark }
12508223286dSJordan Crouse 
1251030af2b0SRob Clark void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
12528223286dSJordan Crouse 		uint32_t flags, struct msm_gem_address_space *aspace,
1253030af2b0SRob Clark 		struct drm_gem_object **bo, uint64_t *iova)
12548223286dSJordan Crouse {
12558223286dSJordan Crouse 	void *vaddr;
1256030af2b0SRob Clark 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
12578223286dSJordan Crouse 	int ret;
12588223286dSJordan Crouse 
12598223286dSJordan Crouse 	if (IS_ERR(obj))
12608223286dSJordan Crouse 		return ERR_CAST(obj);
12618223286dSJordan Crouse 
12628223286dSJordan Crouse 	if (iova) {
12639fe041f6SJordan Crouse 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
126493f7abf1SJordan Crouse 		if (ret)
126593f7abf1SJordan Crouse 			goto err;
12668223286dSJordan Crouse 	}
12678223286dSJordan Crouse 
12688223286dSJordan Crouse 	vaddr = msm_gem_get_vaddr(obj);
1269c9811d0fSWei Yongjun 	if (IS_ERR(vaddr)) {
12707ad0e8cfSJordan Crouse 		msm_gem_unpin_iova(obj, aspace);
127193f7abf1SJordan Crouse 		ret = PTR_ERR(vaddr);
127293f7abf1SJordan Crouse 		goto err;
12738223286dSJordan Crouse 	}
12748223286dSJordan Crouse 
12758223286dSJordan Crouse 	if (bo)
12768223286dSJordan Crouse 		*bo = obj;
12778223286dSJordan Crouse 
12788223286dSJordan Crouse 	return vaddr;
127993f7abf1SJordan Crouse err:
1280f7d33950SEmil Velikov 	drm_gem_object_put(obj);
128193f7abf1SJordan Crouse 
128293f7abf1SJordan Crouse 	return ERR_PTR(ret);
128393f7abf1SJordan Crouse 
12848223286dSJordan Crouse }
12858223286dSJordan Crouse 
12861e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo,
1287030af2b0SRob Clark 		struct msm_gem_address_space *aspace)
12881e29dff0SJordan Crouse {
12891e29dff0SJordan Crouse 	if (IS_ERR_OR_NULL(bo))
12901e29dff0SJordan Crouse 		return;
12911e29dff0SJordan Crouse 
12921e29dff0SJordan Crouse 	msm_gem_put_vaddr(bo);
12937ad0e8cfSJordan Crouse 	msm_gem_unpin_iova(bo, aspace);
1294f7d33950SEmil Velikov 	drm_gem_object_put(bo);
12951e29dff0SJordan Crouse }
12960815d774SJordan Crouse 
12970815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
12980815d774SJordan Crouse {
12990815d774SJordan Crouse 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
13000815d774SJordan Crouse 	va_list ap;
13010815d774SJordan Crouse 
13020815d774SJordan Crouse 	if (!fmt)
13030815d774SJordan Crouse 		return;
13040815d774SJordan Crouse 
13050815d774SJordan Crouse 	va_start(ap, fmt);
13060815d774SJordan Crouse 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
13070815d774SJordan Crouse 	va_end(ap);
13080815d774SJordan Crouse }
1309