xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision cb1e3818)
1c8afe684SRob Clark /*
2c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
3c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
4c8afe684SRob Clark  *
5c8afe684SRob Clark  * This program is free software; you can redistribute it and/or modify it
6c8afe684SRob Clark  * under the terms of the GNU General Public License version 2 as published by
7c8afe684SRob Clark  * the Free Software Foundation.
8c8afe684SRob Clark  *
9c8afe684SRob Clark  * This program is distributed in the hope that it will be useful, but WITHOUT
10c8afe684SRob Clark  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11c8afe684SRob Clark  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12c8afe684SRob Clark  * more details.
13c8afe684SRob Clark  *
14c8afe684SRob Clark  * You should have received a copy of the GNU General Public License along with
15c8afe684SRob Clark  * this program.  If not, see <http://www.gnu.org/licenses/>.
16c8afe684SRob Clark  */
17c8afe684SRob Clark 
18c8afe684SRob Clark #include <linux/spinlock.h>
19c8afe684SRob Clark #include <linux/shmem_fs.h>
2005b84911SRob Clark #include <linux/dma-buf.h>
2101c8f1c4SDan Williams #include <linux/pfn_t.h>
22c8afe684SRob Clark 
23c8afe684SRob Clark #include "msm_drv.h"
24fde5de6cSRob Clark #include "msm_fence.h"
25c8afe684SRob Clark #include "msm_gem.h"
267198e6b0SRob Clark #include "msm_gpu.h"
27871d812aSRob Clark #include "msm_mmu.h"
28c8afe684SRob Clark 
29871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj)
30871d812aSRob Clark {
31871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
32871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
33871d812aSRob Clark 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34871d812aSRob Clark 			priv->vram.paddr;
35871d812aSRob Clark }
36871d812aSRob Clark 
37072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj)
38072f1f91SRob Clark {
39072f1f91SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
40072f1f91SRob Clark 	return !msm_obj->vram_node;
41072f1f91SRob Clark }
42072f1f91SRob Clark 
43871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */
44871d812aSRob Clark static struct page **get_pages_vram(struct drm_gem_object *obj,
45871d812aSRob Clark 		int npages)
46871d812aSRob Clark {
47871d812aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
48871d812aSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
49871d812aSRob Clark 	dma_addr_t paddr;
50871d812aSRob Clark 	struct page **p;
51871d812aSRob Clark 	int ret, i;
52871d812aSRob Clark 
532098105eSMichal Hocko 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
54871d812aSRob Clark 	if (!p)
55871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
56871d812aSRob Clark 
574e64e553SChris Wilson 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
58871d812aSRob Clark 	if (ret) {
592098105eSMichal Hocko 		kvfree(p);
60871d812aSRob Clark 		return ERR_PTR(ret);
61871d812aSRob Clark 	}
62871d812aSRob Clark 
63871d812aSRob Clark 	paddr = physaddr(obj);
64871d812aSRob Clark 	for (i = 0; i < npages; i++) {
65871d812aSRob Clark 		p[i] = phys_to_page(paddr);
66871d812aSRob Clark 		paddr += PAGE_SIZE;
67871d812aSRob Clark 	}
68871d812aSRob Clark 
69871d812aSRob Clark 	return p;
70871d812aSRob Clark }
71c8afe684SRob Clark 
72c8afe684SRob Clark /* called with dev->struct_mutex held */
73c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
74c8afe684SRob Clark {
75c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
76c8afe684SRob Clark 
77c8afe684SRob Clark 	if (!msm_obj->pages) {
78c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
79871d812aSRob Clark 		struct page **p;
80c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
81c8afe684SRob Clark 
82072f1f91SRob Clark 		if (use_pages(obj))
830cdbe8acSDavid Herrmann 			p = drm_gem_get_pages(obj);
84871d812aSRob Clark 		else
85871d812aSRob Clark 			p = get_pages_vram(obj, npages);
86871d812aSRob Clark 
87c8afe684SRob Clark 		if (IS_ERR(p)) {
88c8afe684SRob Clark 			dev_err(dev->dev, "could not get pages: %ld\n",
89c8afe684SRob Clark 					PTR_ERR(p));
90c8afe684SRob Clark 			return p;
91c8afe684SRob Clark 		}
92c8afe684SRob Clark 
93c8afe684SRob Clark 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
941f70e079SWei Yongjun 		if (IS_ERR(msm_obj->sgt)) {
95c8afe684SRob Clark 			dev_err(dev->dev, "failed to allocate sgt\n");
961f70e079SWei Yongjun 			return ERR_CAST(msm_obj->sgt);
97c8afe684SRob Clark 		}
98c8afe684SRob Clark 
99c8afe684SRob Clark 		msm_obj->pages = p;
100c8afe684SRob Clark 
101c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
102c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
103c8afe684SRob Clark 		 */
104c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
105c8afe684SRob Clark 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
106c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
107c8afe684SRob Clark 	}
108c8afe684SRob Clark 
109c8afe684SRob Clark 	return msm_obj->pages;
110c8afe684SRob Clark }
111c8afe684SRob Clark 
112c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
113c8afe684SRob Clark {
114c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
115c8afe684SRob Clark 
116c8afe684SRob Clark 	if (msm_obj->pages) {
117c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
118c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
119c8afe684SRob Clark 		 */
120c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
121c8afe684SRob Clark 			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
122c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
123c8afe684SRob Clark 		sg_free_table(msm_obj->sgt);
124c8afe684SRob Clark 		kfree(msm_obj->sgt);
125c8afe684SRob Clark 
126072f1f91SRob Clark 		if (use_pages(obj))
127c8afe684SRob Clark 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
1281ffa2425SMicah Richert 		else {
129871d812aSRob Clark 			drm_mm_remove_node(msm_obj->vram_node);
1302098105eSMichal Hocko 			kvfree(msm_obj->pages);
1311ffa2425SMicah Richert 		}
132871d812aSRob Clark 
133c8afe684SRob Clark 		msm_obj->pages = NULL;
134c8afe684SRob Clark 	}
135c8afe684SRob Clark }
136c8afe684SRob Clark 
13705b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj)
13805b84911SRob Clark {
13905b84911SRob Clark 	struct drm_device *dev = obj->dev;
14005b84911SRob Clark 	struct page **p;
14105b84911SRob Clark 	mutex_lock(&dev->struct_mutex);
14205b84911SRob Clark 	p = get_pages(obj);
14305b84911SRob Clark 	mutex_unlock(&dev->struct_mutex);
14405b84911SRob Clark 	return p;
14505b84911SRob Clark }
14605b84911SRob Clark 
14705b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj)
14805b84911SRob Clark {
14905b84911SRob Clark 	/* when we start tracking the pin count, then do something here */
15005b84911SRob Clark }
15105b84911SRob Clark 
152c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj,
153c8afe684SRob Clark 		struct vm_area_struct *vma)
154c8afe684SRob Clark {
155c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156c8afe684SRob Clark 
157c8afe684SRob Clark 	vma->vm_flags &= ~VM_PFNMAP;
158c8afe684SRob Clark 	vma->vm_flags |= VM_MIXEDMAP;
159c8afe684SRob Clark 
160c8afe684SRob Clark 	if (msm_obj->flags & MSM_BO_WC) {
161c8afe684SRob Clark 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
162c8afe684SRob Clark 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
163c8afe684SRob Clark 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
164c8afe684SRob Clark 	} else {
165c8afe684SRob Clark 		/*
166c8afe684SRob Clark 		 * Shunt off cached objs to shmem file so they have their own
167c8afe684SRob Clark 		 * address_space (so unmap_mapping_range does what we want,
168c8afe684SRob Clark 		 * in particular in the case of mmap'd dmabufs)
169c8afe684SRob Clark 		 */
170c8afe684SRob Clark 		fput(vma->vm_file);
171c8afe684SRob Clark 		get_file(obj->filp);
172c8afe684SRob Clark 		vma->vm_pgoff = 0;
173c8afe684SRob Clark 		vma->vm_file  = obj->filp;
174c8afe684SRob Clark 
175c8afe684SRob Clark 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
176c8afe684SRob Clark 	}
177c8afe684SRob Clark 
178c8afe684SRob Clark 	return 0;
179c8afe684SRob Clark }
180c8afe684SRob Clark 
181c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
182c8afe684SRob Clark {
183c8afe684SRob Clark 	int ret;
184c8afe684SRob Clark 
185c8afe684SRob Clark 	ret = drm_gem_mmap(filp, vma);
186c8afe684SRob Clark 	if (ret) {
187c8afe684SRob Clark 		DBG("mmap failed: %d", ret);
188c8afe684SRob Clark 		return ret;
189c8afe684SRob Clark 	}
190c8afe684SRob Clark 
191c8afe684SRob Clark 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
192c8afe684SRob Clark }
193c8afe684SRob Clark 
19411bac800SDave Jiang int msm_gem_fault(struct vm_fault *vmf)
195c8afe684SRob Clark {
19611bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
197c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
198c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
199d78d383aSRob Clark 	struct msm_drm_private *priv = dev->dev_private;
200c8afe684SRob Clark 	struct page **pages;
201c8afe684SRob Clark 	unsigned long pfn;
202c8afe684SRob Clark 	pgoff_t pgoff;
203c8afe684SRob Clark 	int ret;
204c8afe684SRob Clark 
205d78d383aSRob Clark 	/* This should only happen if userspace tries to pass a mmap'd
206d78d383aSRob Clark 	 * but unfaulted gem bo vaddr into submit ioctl, triggering
207d78d383aSRob Clark 	 * a page fault while struct_mutex is already held.  This is
208d78d383aSRob Clark 	 * not a valid use-case so just bail.
209d78d383aSRob Clark 	 */
210d78d383aSRob Clark 	if (priv->struct_mutex_task == current)
211d78d383aSRob Clark 		return VM_FAULT_SIGBUS;
212d78d383aSRob Clark 
213c8afe684SRob Clark 	/* Make sure we don't parallel update on a fault, nor move or remove
214c8afe684SRob Clark 	 * something from beneath our feet
215c8afe684SRob Clark 	 */
216c8afe684SRob Clark 	ret = mutex_lock_interruptible(&dev->struct_mutex);
217c8afe684SRob Clark 	if (ret)
218c8afe684SRob Clark 		goto out;
219c8afe684SRob Clark 
220c8afe684SRob Clark 	/* make sure we have pages attached now */
221c8afe684SRob Clark 	pages = get_pages(obj);
222c8afe684SRob Clark 	if (IS_ERR(pages)) {
223c8afe684SRob Clark 		ret = PTR_ERR(pages);
224c8afe684SRob Clark 		goto out_unlock;
225c8afe684SRob Clark 	}
226c8afe684SRob Clark 
227c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
2281a29d85eSJan Kara 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
229c8afe684SRob Clark 
230871d812aSRob Clark 	pfn = page_to_pfn(pages[pgoff]);
231c8afe684SRob Clark 
2321a29d85eSJan Kara 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
233c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
234c8afe684SRob Clark 
2351a29d85eSJan Kara 	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
236c8afe684SRob Clark 
237c8afe684SRob Clark out_unlock:
238c8afe684SRob Clark 	mutex_unlock(&dev->struct_mutex);
239c8afe684SRob Clark out:
240c8afe684SRob Clark 	switch (ret) {
241c8afe684SRob Clark 	case -EAGAIN:
242c8afe684SRob Clark 	case 0:
243c8afe684SRob Clark 	case -ERESTARTSYS:
244c8afe684SRob Clark 	case -EINTR:
245505886d5SRob Clark 	case -EBUSY:
246505886d5SRob Clark 		/*
247505886d5SRob Clark 		 * EBUSY is ok: this just means that another thread
248505886d5SRob Clark 		 * already did the job.
249505886d5SRob Clark 		 */
250c8afe684SRob Clark 		return VM_FAULT_NOPAGE;
251c8afe684SRob Clark 	case -ENOMEM:
252c8afe684SRob Clark 		return VM_FAULT_OOM;
253c8afe684SRob Clark 	default:
254c8afe684SRob Clark 		return VM_FAULT_SIGBUS;
255c8afe684SRob Clark 	}
256c8afe684SRob Clark }
257c8afe684SRob Clark 
258c8afe684SRob Clark /** get mmap offset */
259c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
260c8afe684SRob Clark {
261c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
262c8afe684SRob Clark 	int ret;
263c8afe684SRob Clark 
264c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
265c8afe684SRob Clark 
266c8afe684SRob Clark 	/* Make it mmapable */
267c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
268c8afe684SRob Clark 
269c8afe684SRob Clark 	if (ret) {
270c8afe684SRob Clark 		dev_err(dev->dev, "could not allocate mmap offset\n");
271c8afe684SRob Clark 		return 0;
272c8afe684SRob Clark 	}
273c8afe684SRob Clark 
274c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
275c8afe684SRob Clark }
276c8afe684SRob Clark 
277c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
278c8afe684SRob Clark {
279c8afe684SRob Clark 	uint64_t offset;
280c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
281c8afe684SRob Clark 	offset = mmap_offset(obj);
282c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
283c8afe684SRob Clark 	return offset;
284c8afe684SRob Clark }
285c8afe684SRob Clark 
2864fe5f65eSRob Clark static void
2874fe5f65eSRob Clark put_iova(struct drm_gem_object *obj)
2884fe5f65eSRob Clark {
2894fe5f65eSRob Clark 	struct drm_device *dev = obj->dev;
2904fe5f65eSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
2914fe5f65eSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
2924fe5f65eSRob Clark 	int id;
2934fe5f65eSRob Clark 
2944fe5f65eSRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2954fe5f65eSRob Clark 
2964fe5f65eSRob Clark 	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
297de85d2b3SRob Clark 		if (!priv->aspace[id])
298de85d2b3SRob Clark 			continue;
299667ce33eSRob Clark 		msm_gem_unmap_vma(priv->aspace[id],
300667ce33eSRob Clark 				&msm_obj->domain[id], msm_obj->sgt);
3014fe5f65eSRob Clark 	}
3024fe5f65eSRob Clark }
3034fe5f65eSRob Clark 
304c8afe684SRob Clark /* should be called under struct_mutex.. although it can be called
305c8afe684SRob Clark  * from atomic context without struct_mutex to acquire an extra
306c8afe684SRob Clark  * iova ref if you know one is already held.
307c8afe684SRob Clark  *
308c8afe684SRob Clark  * That means when I do eventually need to add support for unpinning
309c8afe684SRob Clark  * the refcnt counter needs to be atomic_t.
310c8afe684SRob Clark  */
311c8afe684SRob Clark int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
31278babc16SRob Clark 		uint64_t *iova)
313c8afe684SRob Clark {
314c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
315c8afe684SRob Clark 	int ret = 0;
316c8afe684SRob Clark 
317cb1e3818SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
318cb1e3818SRob Clark 
319c8afe684SRob Clark 	if (!msm_obj->domain[id].iova) {
320c8afe684SRob Clark 		struct msm_drm_private *priv = obj->dev->dev_private;
321871d812aSRob Clark 		struct page **pages = get_pages(obj);
322871d812aSRob Clark 
323c8afe684SRob Clark 		if (IS_ERR(pages))
324c8afe684SRob Clark 			return PTR_ERR(pages);
325871d812aSRob Clark 
326871d812aSRob Clark 		if (iommu_present(&platform_bus_type)) {
327667ce33eSRob Clark 			ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
328667ce33eSRob Clark 					msm_obj->sgt, obj->size >> PAGE_SHIFT);
329871d812aSRob Clark 		} else {
330871d812aSRob Clark 			msm_obj->domain[id].iova = physaddr(obj);
331871d812aSRob Clark 		}
332c8afe684SRob Clark 	}
333c8afe684SRob Clark 
334c8afe684SRob Clark 	if (!ret)
335c8afe684SRob Clark 		*iova = msm_obj->domain[id].iova;
336c8afe684SRob Clark 
337c8afe684SRob Clark 	return ret;
338c8afe684SRob Clark }
339c8afe684SRob Clark 
3402638d90aSRob Clark /* get iova, taking a reference.  Should have a matching put */
34178babc16SRob Clark int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
342c8afe684SRob Clark {
343edd4fc63SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
344c8afe684SRob Clark 	int ret;
345edd4fc63SRob Clark 
346edd4fc63SRob Clark 	/* this is safe right now because we don't unmap until the
347edd4fc63SRob Clark 	 * bo is deleted:
348edd4fc63SRob Clark 	 */
349edd4fc63SRob Clark 	if (msm_obj->domain[id].iova) {
350cb1e3818SRob Clark 		might_lock(&obj->dev->struct_mutex);
351edd4fc63SRob Clark 		*iova = msm_obj->domain[id].iova;
352edd4fc63SRob Clark 		return 0;
353edd4fc63SRob Clark 	}
354edd4fc63SRob Clark 
355c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
356c8afe684SRob Clark 	ret = msm_gem_get_iova_locked(obj, id, iova);
357c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
358c8afe684SRob Clark 	return ret;
359c8afe684SRob Clark }
360c8afe684SRob Clark 
3612638d90aSRob Clark /* get iova without taking a reference, used in places where you have
3622638d90aSRob Clark  * already done a 'msm_gem_get_iova()'.
3632638d90aSRob Clark  */
36478babc16SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
3652638d90aSRob Clark {
3662638d90aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
3672638d90aSRob Clark 	WARN_ON(!msm_obj->domain[id].iova);
3682638d90aSRob Clark 	return msm_obj->domain[id].iova;
3692638d90aSRob Clark }
3702638d90aSRob Clark 
371c8afe684SRob Clark void msm_gem_put_iova(struct drm_gem_object *obj, int id)
372c8afe684SRob Clark {
373c8afe684SRob Clark 	// XXX TODO ..
374c8afe684SRob Clark 	// NOTE: probably don't need a _locked() version.. we wouldn't
375c8afe684SRob Clark 	// normally unmap here, but instead just mark that it could be
376c8afe684SRob Clark 	// unmapped (if the iova refcnt drops to zero), but then later
377c8afe684SRob Clark 	// if another _get_iova_locked() fails we can start unmapping
378c8afe684SRob Clark 	// things that are no longer needed..
379c8afe684SRob Clark }
380c8afe684SRob Clark 
381c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
382c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
383c8afe684SRob Clark {
384c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
385c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
386c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
387c8afe684SRob Clark 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
388c8afe684SRob Clark }
389c8afe684SRob Clark 
390c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
391c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
392c8afe684SRob Clark {
393c8afe684SRob Clark 	struct drm_gem_object *obj;
394c8afe684SRob Clark 	int ret = 0;
395c8afe684SRob Clark 
396c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
397a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file, handle);
398c8afe684SRob Clark 	if (obj == NULL) {
399c8afe684SRob Clark 		ret = -ENOENT;
400c8afe684SRob Clark 		goto fail;
401c8afe684SRob Clark 	}
402c8afe684SRob Clark 
403c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
404c8afe684SRob Clark 
405c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
406c8afe684SRob Clark 
407c8afe684SRob Clark fail:
408c8afe684SRob Clark 	return ret;
409c8afe684SRob Clark }
410c8afe684SRob Clark 
41118f23049SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
412c8afe684SRob Clark {
413c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
414c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
415c8afe684SRob Clark 	if (!msm_obj->vaddr) {
416c8afe684SRob Clark 		struct page **pages = get_pages(obj);
417c8afe684SRob Clark 		if (IS_ERR(pages))
418c8afe684SRob Clark 			return ERR_CAST(pages);
419c8afe684SRob Clark 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
420c8afe684SRob Clark 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
42169a834c2SRob Clark 		if (msm_obj->vaddr == NULL)
42269a834c2SRob Clark 			return ERR_PTR(-ENOMEM);
423c8afe684SRob Clark 	}
424e1e9db2cSRob Clark 	msm_obj->vmap_count++;
425c8afe684SRob Clark 	return msm_obj->vaddr;
426c8afe684SRob Clark }
427c8afe684SRob Clark 
42818f23049SRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj)
429c8afe684SRob Clark {
430c8afe684SRob Clark 	void *ret;
431c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
43218f23049SRob Clark 	ret = msm_gem_get_vaddr_locked(obj);
433c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
434c8afe684SRob Clark 	return ret;
435c8afe684SRob Clark }
436c8afe684SRob Clark 
43718f23049SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
43818f23049SRob Clark {
439e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
44018f23049SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
441e1e9db2cSRob Clark 	WARN_ON(msm_obj->vmap_count < 1);
442e1e9db2cSRob Clark 	msm_obj->vmap_count--;
44318f23049SRob Clark }
44418f23049SRob Clark 
44518f23049SRob Clark void msm_gem_put_vaddr(struct drm_gem_object *obj)
44618f23049SRob Clark {
447e1e9db2cSRob Clark 	mutex_lock(&obj->dev->struct_mutex);
448e1e9db2cSRob Clark 	msm_gem_put_vaddr_locked(obj);
449e1e9db2cSRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
45018f23049SRob Clark }
45118f23049SRob Clark 
4524cd33c48SRob Clark /* Update madvise status, returns true if not purged, else
4534cd33c48SRob Clark  * false or -errno.
4544cd33c48SRob Clark  */
4554cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
4564cd33c48SRob Clark {
4574cd33c48SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
4584cd33c48SRob Clark 
4594cd33c48SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
4604cd33c48SRob Clark 
4614cd33c48SRob Clark 	if (msm_obj->madv != __MSM_MADV_PURGED)
4624cd33c48SRob Clark 		msm_obj->madv = madv;
4634cd33c48SRob Clark 
4644cd33c48SRob Clark 	return (msm_obj->madv != __MSM_MADV_PURGED);
4654cd33c48SRob Clark }
4664cd33c48SRob Clark 
46768209390SRob Clark void msm_gem_purge(struct drm_gem_object *obj)
46868209390SRob Clark {
46968209390SRob Clark 	struct drm_device *dev = obj->dev;
47068209390SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
47168209390SRob Clark 
47268209390SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
47368209390SRob Clark 	WARN_ON(!is_purgeable(msm_obj));
47468209390SRob Clark 	WARN_ON(obj->import_attach);
47568209390SRob Clark 
47668209390SRob Clark 	put_iova(obj);
47768209390SRob Clark 
478e1e9db2cSRob Clark 	msm_gem_vunmap(obj);
47968209390SRob Clark 
48068209390SRob Clark 	put_pages(obj);
48168209390SRob Clark 
48268209390SRob Clark 	msm_obj->madv = __MSM_MADV_PURGED;
48368209390SRob Clark 
48468209390SRob Clark 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
48568209390SRob Clark 	drm_gem_free_mmap_offset(obj);
48668209390SRob Clark 
48768209390SRob Clark 	/* Our goal here is to return as much of the memory as
48868209390SRob Clark 	 * is possible back to the system as we are called from OOM.
48968209390SRob Clark 	 * To do this we must instruct the shmfs to drop all of its
49068209390SRob Clark 	 * backing pages, *now*.
49168209390SRob Clark 	 */
49268209390SRob Clark 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
49368209390SRob Clark 
49468209390SRob Clark 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
49568209390SRob Clark 			0, (loff_t)-1);
49668209390SRob Clark }
49768209390SRob Clark 
498e1e9db2cSRob Clark void msm_gem_vunmap(struct drm_gem_object *obj)
499e1e9db2cSRob Clark {
500e1e9db2cSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
501e1e9db2cSRob Clark 
502e1e9db2cSRob Clark 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
503e1e9db2cSRob Clark 		return;
504e1e9db2cSRob Clark 
505e1e9db2cSRob Clark 	vunmap(msm_obj->vaddr);
506e1e9db2cSRob Clark 	msm_obj->vaddr = NULL;
507e1e9db2cSRob Clark }
508e1e9db2cSRob Clark 
509b6295f9aSRob Clark /* must be called before _move_to_active().. */
510b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj,
511b6295f9aSRob Clark 		struct msm_fence_context *fctx, bool exclusive)
512b6295f9aSRob Clark {
513b6295f9aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
514b6295f9aSRob Clark 	struct reservation_object_list *fobj;
515f54d1867SChris Wilson 	struct dma_fence *fence;
516b6295f9aSRob Clark 	int i, ret;
517b6295f9aSRob Clark 
518b6295f9aSRob Clark 	if (!exclusive) {
519b6295f9aSRob Clark 		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
520b6295f9aSRob Clark 		 * which makes this a slightly strange place to call it.  OTOH this
521b6295f9aSRob Clark 		 * is a convenient can-fail point to hook it in.  (And similar to
522b6295f9aSRob Clark 		 * how etnaviv and nouveau handle this.)
523b6295f9aSRob Clark 		 */
524b6295f9aSRob Clark 		ret = reservation_object_reserve_shared(msm_obj->resv);
525b6295f9aSRob Clark 		if (ret)
526b6295f9aSRob Clark 			return ret;
527b6295f9aSRob Clark 	}
528b6295f9aSRob Clark 
529b6295f9aSRob Clark 	fobj = reservation_object_get_list(msm_obj->resv);
530b6295f9aSRob Clark 	if (!fobj || (fobj->shared_count == 0)) {
531b6295f9aSRob Clark 		fence = reservation_object_get_excl(msm_obj->resv);
532b6295f9aSRob Clark 		/* don't need to wait on our own fences, since ring is fifo */
533b6295f9aSRob Clark 		if (fence && (fence->context != fctx->context)) {
534f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
535b6295f9aSRob Clark 			if (ret)
536b6295f9aSRob Clark 				return ret;
537b6295f9aSRob Clark 		}
538b6295f9aSRob Clark 	}
539b6295f9aSRob Clark 
540b6295f9aSRob Clark 	if (!exclusive || !fobj)
541b6295f9aSRob Clark 		return 0;
542b6295f9aSRob Clark 
543b6295f9aSRob Clark 	for (i = 0; i < fobj->shared_count; i++) {
544b6295f9aSRob Clark 		fence = rcu_dereference_protected(fobj->shared[i],
545b6295f9aSRob Clark 						reservation_object_held(msm_obj->resv));
546b6295f9aSRob Clark 		if (fence->context != fctx->context) {
547f54d1867SChris Wilson 			ret = dma_fence_wait(fence, true);
548b6295f9aSRob Clark 			if (ret)
549b6295f9aSRob Clark 				return ret;
550b6295f9aSRob Clark 		}
551b6295f9aSRob Clark 	}
552b6295f9aSRob Clark 
553b6295f9aSRob Clark 	return 0;
554b6295f9aSRob Clark }
555b6295f9aSRob Clark 
5567198e6b0SRob Clark void msm_gem_move_to_active(struct drm_gem_object *obj,
557f54d1867SChris Wilson 		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
5587198e6b0SRob Clark {
5597198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5604cd33c48SRob Clark 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
5617198e6b0SRob Clark 	msm_obj->gpu = gpu;
562b6295f9aSRob Clark 	if (exclusive)
563b6295f9aSRob Clark 		reservation_object_add_excl_fence(msm_obj->resv, fence);
564bf6811f3SRob Clark 	else
565b6295f9aSRob Clark 		reservation_object_add_shared_fence(msm_obj->resv, fence);
5667198e6b0SRob Clark 	list_del_init(&msm_obj->mm_list);
5677198e6b0SRob Clark 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
5687198e6b0SRob Clark }
5697198e6b0SRob Clark 
5707198e6b0SRob Clark void msm_gem_move_to_inactive(struct drm_gem_object *obj)
5717198e6b0SRob Clark {
5727198e6b0SRob Clark 	struct drm_device *dev = obj->dev;
5737198e6b0SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
5747198e6b0SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
5757198e6b0SRob Clark 
5767198e6b0SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5777198e6b0SRob Clark 
5787198e6b0SRob Clark 	msm_obj->gpu = NULL;
5797198e6b0SRob Clark 	list_del_init(&msm_obj->mm_list);
5807198e6b0SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
5817198e6b0SRob Clark }
5827198e6b0SRob Clark 
583ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
584ba00c3f2SRob Clark {
585b6295f9aSRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
586b6295f9aSRob Clark 	bool write = !!(op & MSM_PREP_WRITE);
587f755e227SChris Wilson 	unsigned long remain =
588f755e227SChris Wilson 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
589f755e227SChris Wilson 	long ret;
590b6295f9aSRob Clark 
591b6295f9aSRob Clark 	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
592f755e227SChris Wilson 						  true,  remain);
593f755e227SChris Wilson 	if (ret == 0)
594f755e227SChris Wilson 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
595f755e227SChris Wilson 	else if (ret < 0)
596f755e227SChris Wilson 		return ret;
597ba00c3f2SRob Clark 
5987198e6b0SRob Clark 	/* TODO cache maintenance */
5997198e6b0SRob Clark 
600b6295f9aSRob Clark 	return 0;
6017198e6b0SRob Clark }
6027198e6b0SRob Clark 
6037198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj)
6047198e6b0SRob Clark {
6057198e6b0SRob Clark 	/* TODO cache maintenance */
606c8afe684SRob Clark 	return 0;
607c8afe684SRob Clark }
608c8afe684SRob Clark 
609c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
610f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type,
611b6295f9aSRob Clark 		struct seq_file *m)
612b6295f9aSRob Clark {
613f54d1867SChris Wilson 	if (!dma_fence_is_signaled(fence))
614b6295f9aSRob Clark 		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
615b6295f9aSRob Clark 				fence->ops->get_driver_name(fence),
616b6295f9aSRob Clark 				fence->ops->get_timeline_name(fence),
617b6295f9aSRob Clark 				fence->seqno);
618b6295f9aSRob Clark }
619b6295f9aSRob Clark 
620c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
621c8afe684SRob Clark {
622c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
623b6295f9aSRob Clark 	struct reservation_object *robj = msm_obj->resv;
624b6295f9aSRob Clark 	struct reservation_object_list *fobj;
625667ce33eSRob Clark 	struct msm_drm_private *priv = obj->dev->dev_private;
626f54d1867SChris Wilson 	struct dma_fence *fence;
627c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
6284cd33c48SRob Clark 	const char *madv;
629667ce33eSRob Clark 	unsigned id;
630c8afe684SRob Clark 
631b6295f9aSRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
632b6295f9aSRob Clark 
6334cd33c48SRob Clark 	switch (msm_obj->madv) {
6344cd33c48SRob Clark 	case __MSM_MADV_PURGED:
6354cd33c48SRob Clark 		madv = " purged";
6364cd33c48SRob Clark 		break;
6374cd33c48SRob Clark 	case MSM_MADV_DONTNEED:
6384cd33c48SRob Clark 		madv = " purgeable";
6394cd33c48SRob Clark 		break;
6404cd33c48SRob Clark 	case MSM_MADV_WILLNEED:
6414cd33c48SRob Clark 	default:
6424cd33c48SRob Clark 		madv = "";
6434cd33c48SRob Clark 		break;
6444cd33c48SRob Clark 	}
6454cd33c48SRob Clark 
646667ce33eSRob Clark 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
6477198e6b0SRob Clark 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
6482c935bc5SPeter Zijlstra 			obj->name, kref_read(&obj->refcount),
649667ce33eSRob Clark 			off, msm_obj->vaddr);
650667ce33eSRob Clark 
651667ce33eSRob Clark 	for (id = 0; id < priv->num_aspaces; id++)
652667ce33eSRob Clark 		seq_printf(m, " %08llx", msm_obj->domain[id].iova);
653667ce33eSRob Clark 
654667ce33eSRob Clark 	seq_printf(m, " %zu%s\n", obj->size, madv);
655b6295f9aSRob Clark 
656b6295f9aSRob Clark 	rcu_read_lock();
657b6295f9aSRob Clark 	fobj = rcu_dereference(robj->fence);
658b6295f9aSRob Clark 	if (fobj) {
659b6295f9aSRob Clark 		unsigned int i, shared_count = fobj->shared_count;
660b6295f9aSRob Clark 
661b6295f9aSRob Clark 		for (i = 0; i < shared_count; i++) {
662b6295f9aSRob Clark 			fence = rcu_dereference(fobj->shared[i]);
663b6295f9aSRob Clark 			describe_fence(fence, "Shared", m);
664b6295f9aSRob Clark 		}
665b6295f9aSRob Clark 	}
666b6295f9aSRob Clark 
667b6295f9aSRob Clark 	fence = rcu_dereference(robj->fence_excl);
668b6295f9aSRob Clark 	if (fence)
669b6295f9aSRob Clark 		describe_fence(fence, "Exclusive", m);
670b6295f9aSRob Clark 	rcu_read_unlock();
671c8afe684SRob Clark }
672c8afe684SRob Clark 
673c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
674c8afe684SRob Clark {
675c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
676c8afe684SRob Clark 	int count = 0;
677c8afe684SRob Clark 	size_t size = 0;
678c8afe684SRob Clark 
679c8afe684SRob Clark 	list_for_each_entry(msm_obj, list, mm_list) {
680c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
681c8afe684SRob Clark 		seq_printf(m, "   ");
682c8afe684SRob Clark 		msm_gem_describe(obj, m);
683c8afe684SRob Clark 		count++;
684c8afe684SRob Clark 		size += obj->size;
685c8afe684SRob Clark 	}
686c8afe684SRob Clark 
687c8afe684SRob Clark 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
688c8afe684SRob Clark }
689c8afe684SRob Clark #endif
690c8afe684SRob Clark 
691c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
692c8afe684SRob Clark {
693c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
694c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
695c8afe684SRob Clark 
696c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
697c8afe684SRob Clark 
6987198e6b0SRob Clark 	/* object should not be on active list: */
6997198e6b0SRob Clark 	WARN_ON(is_active(msm_obj));
7007198e6b0SRob Clark 
701c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
702c8afe684SRob Clark 
7034fe5f65eSRob Clark 	put_iova(obj);
704c8afe684SRob Clark 
70505b84911SRob Clark 	if (obj->import_attach) {
70605b84911SRob Clark 		if (msm_obj->vaddr)
70705b84911SRob Clark 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
70805b84911SRob Clark 
70905b84911SRob Clark 		/* Don't drop the pages for imported dmabuf, as they are not
71005b84911SRob Clark 		 * ours, just free the array we allocated:
71105b84911SRob Clark 		 */
71205b84911SRob Clark 		if (msm_obj->pages)
7132098105eSMichal Hocko 			kvfree(msm_obj->pages);
71405b84911SRob Clark 
715f28730c8Sjilai wang 		drm_prime_gem_destroy(obj, msm_obj->sgt);
71605b84911SRob Clark 	} else {
717e1e9db2cSRob Clark 		msm_gem_vunmap(obj);
718c8afe684SRob Clark 		put_pages(obj);
71905b84911SRob Clark 	}
720c8afe684SRob Clark 
7217198e6b0SRob Clark 	if (msm_obj->resv == &msm_obj->_resv)
7227198e6b0SRob Clark 		reservation_object_fini(msm_obj->resv);
7237198e6b0SRob Clark 
724c8afe684SRob Clark 	drm_gem_object_release(obj);
725c8afe684SRob Clark 
726c8afe684SRob Clark 	kfree(msm_obj);
727c8afe684SRob Clark }
728c8afe684SRob Clark 
729c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
730c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
731c8afe684SRob Clark 		uint32_t size, uint32_t flags, uint32_t *handle)
732c8afe684SRob Clark {
733c8afe684SRob Clark 	struct drm_gem_object *obj;
734c8afe684SRob Clark 	int ret;
735c8afe684SRob Clark 
736c8afe684SRob Clark 	ret = mutex_lock_interruptible(&dev->struct_mutex);
737c8afe684SRob Clark 	if (ret)
738c8afe684SRob Clark 		return ret;
739c8afe684SRob Clark 
740c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
741c8afe684SRob Clark 
742c8afe684SRob Clark 	mutex_unlock(&dev->struct_mutex);
743c8afe684SRob Clark 
744c8afe684SRob Clark 	if (IS_ERR(obj))
745c8afe684SRob Clark 		return PTR_ERR(obj);
746c8afe684SRob Clark 
747c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
748c8afe684SRob Clark 
749c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
750c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
751c8afe684SRob Clark 
752c8afe684SRob Clark 	return ret;
753c8afe684SRob Clark }
754c8afe684SRob Clark 
75505b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev,
75605b84911SRob Clark 		uint32_t size, uint32_t flags,
75779f0e202SRob Clark 		struct reservation_object *resv,
75805b84911SRob Clark 		struct drm_gem_object **obj)
759c8afe684SRob Clark {
760c8afe684SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
761c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
762072f1f91SRob Clark 	bool use_vram = false;
763c8afe684SRob Clark 
76490dd57deSJordan Crouse 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
76590dd57deSJordan Crouse 
766c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
767c8afe684SRob Clark 	case MSM_BO_UNCACHED:
768c8afe684SRob Clark 	case MSM_BO_CACHED:
769c8afe684SRob Clark 	case MSM_BO_WC:
770c8afe684SRob Clark 		break;
771c8afe684SRob Clark 	default:
772c8afe684SRob Clark 		dev_err(dev->dev, "invalid cache flag: %x\n",
773c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
77405b84911SRob Clark 		return -EINVAL;
775c8afe684SRob Clark 	}
776c8afe684SRob Clark 
777871d812aSRob Clark 	if (!iommu_present(&platform_bus_type))
778072f1f91SRob Clark 		use_vram = true;
779072f1f91SRob Clark 	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
780072f1f91SRob Clark 		use_vram = true;
781072f1f91SRob Clark 
782072f1f91SRob Clark 	if (WARN_ON(use_vram && !priv->vram.size))
783072f1f91SRob Clark 		return -EINVAL;
784072f1f91SRob Clark 
785667ce33eSRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
78605b84911SRob Clark 	if (!msm_obj)
78705b84911SRob Clark 		return -ENOMEM;
788c8afe684SRob Clark 
789072f1f91SRob Clark 	if (use_vram)
790667ce33eSRob Clark 		msm_obj->vram_node = &msm_obj->domain[0].node;
791871d812aSRob Clark 
792c8afe684SRob Clark 	msm_obj->flags = flags;
7934cd33c48SRob Clark 	msm_obj->madv = MSM_MADV_WILLNEED;
794c8afe684SRob Clark 
79579f0e202SRob Clark 	if (resv) {
79679f0e202SRob Clark 		msm_obj->resv = resv;
79779f0e202SRob Clark 	} else {
7987198e6b0SRob Clark 		msm_obj->resv = &msm_obj->_resv;
7997198e6b0SRob Clark 		reservation_object_init(msm_obj->resv);
80079f0e202SRob Clark 	}
801c8afe684SRob Clark 
8027198e6b0SRob Clark 	INIT_LIST_HEAD(&msm_obj->submit_entry);
803c8afe684SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
804c8afe684SRob Clark 
80505b84911SRob Clark 	*obj = &msm_obj->base;
80605b84911SRob Clark 
80705b84911SRob Clark 	return 0;
80805b84911SRob Clark }
80905b84911SRob Clark 
81005b84911SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev,
81105b84911SRob Clark 		uint32_t size, uint32_t flags)
81205b84911SRob Clark {
813871d812aSRob Clark 	struct drm_gem_object *obj = NULL;
81405b84911SRob Clark 	int ret;
81505b84911SRob Clark 
81605b84911SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
81705b84911SRob Clark 
81805b84911SRob Clark 	size = PAGE_ALIGN(size);
81905b84911SRob Clark 
8201a5dff5dSJordan Crouse 	/* Disallow zero sized objects as they make the underlying
8211a5dff5dSJordan Crouse 	 * infrastructure grumpy
8221a5dff5dSJordan Crouse 	 */
8231a5dff5dSJordan Crouse 	if (size == 0)
8241a5dff5dSJordan Crouse 		return ERR_PTR(-EINVAL);
8251a5dff5dSJordan Crouse 
82679f0e202SRob Clark 	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
82705b84911SRob Clark 	if (ret)
82805b84911SRob Clark 		goto fail;
82905b84911SRob Clark 
830072f1f91SRob Clark 	if (use_pages(obj)) {
83105b84911SRob Clark 		ret = drm_gem_object_init(dev, obj, size);
83205b84911SRob Clark 		if (ret)
83305b84911SRob Clark 			goto fail;
834871d812aSRob Clark 	} else {
835871d812aSRob Clark 		drm_gem_private_object_init(dev, obj, size);
836871d812aSRob Clark 	}
83705b84911SRob Clark 
83805b84911SRob Clark 	return obj;
83905b84911SRob Clark 
84005b84911SRob Clark fail:
8419999f105SRob Clark 	drm_gem_object_unreference(obj);
84205b84911SRob Clark 	return ERR_PTR(ret);
84305b84911SRob Clark }
84405b84911SRob Clark 
84505b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev,
84679f0e202SRob Clark 		struct dma_buf *dmabuf, struct sg_table *sgt)
84705b84911SRob Clark {
84805b84911SRob Clark 	struct msm_gem_object *msm_obj;
84905b84911SRob Clark 	struct drm_gem_object *obj;
85079f0e202SRob Clark 	uint32_t size;
85105b84911SRob Clark 	int ret, npages;
85205b84911SRob Clark 
853871d812aSRob Clark 	/* if we don't have IOMMU, don't bother pretending we can import: */
854871d812aSRob Clark 	if (!iommu_present(&platform_bus_type)) {
855871d812aSRob Clark 		dev_err(dev->dev, "cannot import without IOMMU\n");
856871d812aSRob Clark 		return ERR_PTR(-EINVAL);
857871d812aSRob Clark 	}
858871d812aSRob Clark 
85979f0e202SRob Clark 	size = PAGE_ALIGN(dmabuf->size);
86005b84911SRob Clark 
86190dd57deSJordan Crouse 	/* Take mutex so we can modify the inactive list in msm_gem_new_impl */
86290dd57deSJordan Crouse 	mutex_lock(&dev->struct_mutex);
86379f0e202SRob Clark 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
86490dd57deSJordan Crouse 	mutex_unlock(&dev->struct_mutex);
86590dd57deSJordan Crouse 
86605b84911SRob Clark 	if (ret)
86705b84911SRob Clark 		goto fail;
86805b84911SRob Clark 
86905b84911SRob Clark 	drm_gem_private_object_init(dev, obj, size);
87005b84911SRob Clark 
87105b84911SRob Clark 	npages = size / PAGE_SIZE;
87205b84911SRob Clark 
87305b84911SRob Clark 	msm_obj = to_msm_bo(obj);
87405b84911SRob Clark 	msm_obj->sgt = sgt;
8752098105eSMichal Hocko 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
87605b84911SRob Clark 	if (!msm_obj->pages) {
87705b84911SRob Clark 		ret = -ENOMEM;
87805b84911SRob Clark 		goto fail;
87905b84911SRob Clark 	}
88005b84911SRob Clark 
88105b84911SRob Clark 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
88205b84911SRob Clark 	if (ret)
88305b84911SRob Clark 		goto fail;
88405b84911SRob Clark 
885c8afe684SRob Clark 	return obj;
886c8afe684SRob Clark 
887c8afe684SRob Clark fail:
888c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
889c8afe684SRob Clark 	return ERR_PTR(ret);
890c8afe684SRob Clark }
891