xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision c8afe684)
1c8afe684SRob Clark /*
2c8afe684SRob Clark  * Copyright (C) 2013 Red Hat
3c8afe684SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
4c8afe684SRob Clark  *
5c8afe684SRob Clark  * This program is free software; you can redistribute it and/or modify it
6c8afe684SRob Clark  * under the terms of the GNU General Public License version 2 as published by
7c8afe684SRob Clark  * the Free Software Foundation.
8c8afe684SRob Clark  *
9c8afe684SRob Clark  * This program is distributed in the hope that it will be useful, but WITHOUT
10c8afe684SRob Clark  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11c8afe684SRob Clark  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12c8afe684SRob Clark  * more details.
13c8afe684SRob Clark  *
14c8afe684SRob Clark  * You should have received a copy of the GNU General Public License along with
15c8afe684SRob Clark  * this program.  If not, see <http://www.gnu.org/licenses/>.
16c8afe684SRob Clark  */
17c8afe684SRob Clark 
18c8afe684SRob Clark #include <linux/spinlock.h>
19c8afe684SRob Clark #include <linux/shmem_fs.h>
20c8afe684SRob Clark 
21c8afe684SRob Clark #include "msm_drv.h"
22c8afe684SRob Clark #include "msm_gem.h"
23c8afe684SRob Clark 
24c8afe684SRob Clark 
25c8afe684SRob Clark /* called with dev->struct_mutex held */
26c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj)
27c8afe684SRob Clark {
28c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
29c8afe684SRob Clark 
30c8afe684SRob Clark 	if (!msm_obj->pages) {
31c8afe684SRob Clark 		struct drm_device *dev = obj->dev;
32c8afe684SRob Clark 		struct page **p = drm_gem_get_pages(obj, 0);
33c8afe684SRob Clark 		int npages = obj->size >> PAGE_SHIFT;
34c8afe684SRob Clark 
35c8afe684SRob Clark 		if (IS_ERR(p)) {
36c8afe684SRob Clark 			dev_err(dev->dev, "could not get pages: %ld\n",
37c8afe684SRob Clark 					PTR_ERR(p));
38c8afe684SRob Clark 			return p;
39c8afe684SRob Clark 		}
40c8afe684SRob Clark 
41c8afe684SRob Clark 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
42c8afe684SRob Clark 		if (!msm_obj->sgt) {
43c8afe684SRob Clark 			dev_err(dev->dev, "failed to allocate sgt\n");
44c8afe684SRob Clark 			return ERR_PTR(-ENOMEM);
45c8afe684SRob Clark 		}
46c8afe684SRob Clark 
47c8afe684SRob Clark 		msm_obj->pages = p;
48c8afe684SRob Clark 
49c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
50c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
51c8afe684SRob Clark 		 */
52c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
53c8afe684SRob Clark 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
54c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
55c8afe684SRob Clark 	}
56c8afe684SRob Clark 
57c8afe684SRob Clark 	return msm_obj->pages;
58c8afe684SRob Clark }
59c8afe684SRob Clark 
60c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj)
61c8afe684SRob Clark {
62c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
63c8afe684SRob Clark 
64c8afe684SRob Clark 	if (msm_obj->pages) {
65c8afe684SRob Clark 		/* For non-cached buffers, ensure the new pages are clean
66c8afe684SRob Clark 		 * because display controller, GPU, etc. are not coherent:
67c8afe684SRob Clark 		 */
68c8afe684SRob Clark 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
69c8afe684SRob Clark 			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
70c8afe684SRob Clark 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
71c8afe684SRob Clark 		sg_free_table(msm_obj->sgt);
72c8afe684SRob Clark 		kfree(msm_obj->sgt);
73c8afe684SRob Clark 
74c8afe684SRob Clark 		drm_gem_put_pages(obj, msm_obj->pages, true, false);
75c8afe684SRob Clark 		msm_obj->pages = NULL;
76c8afe684SRob Clark 	}
77c8afe684SRob Clark }
78c8afe684SRob Clark 
79c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj,
80c8afe684SRob Clark 		struct vm_area_struct *vma)
81c8afe684SRob Clark {
82c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
83c8afe684SRob Clark 
84c8afe684SRob Clark 	vma->vm_flags &= ~VM_PFNMAP;
85c8afe684SRob Clark 	vma->vm_flags |= VM_MIXEDMAP;
86c8afe684SRob Clark 
87c8afe684SRob Clark 	if (msm_obj->flags & MSM_BO_WC) {
88c8afe684SRob Clark 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
89c8afe684SRob Clark 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
90c8afe684SRob Clark 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
91c8afe684SRob Clark 	} else {
92c8afe684SRob Clark 		/*
93c8afe684SRob Clark 		 * Shunt off cached objs to shmem file so they have their own
94c8afe684SRob Clark 		 * address_space (so unmap_mapping_range does what we want,
95c8afe684SRob Clark 		 * in particular in the case of mmap'd dmabufs)
96c8afe684SRob Clark 		 */
97c8afe684SRob Clark 		fput(vma->vm_file);
98c8afe684SRob Clark 		get_file(obj->filp);
99c8afe684SRob Clark 		vma->vm_pgoff = 0;
100c8afe684SRob Clark 		vma->vm_file  = obj->filp;
101c8afe684SRob Clark 
102c8afe684SRob Clark 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
103c8afe684SRob Clark 	}
104c8afe684SRob Clark 
105c8afe684SRob Clark 	return 0;
106c8afe684SRob Clark }
107c8afe684SRob Clark 
108c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
109c8afe684SRob Clark {
110c8afe684SRob Clark 	int ret;
111c8afe684SRob Clark 
112c8afe684SRob Clark 	ret = drm_gem_mmap(filp, vma);
113c8afe684SRob Clark 	if (ret) {
114c8afe684SRob Clark 		DBG("mmap failed: %d", ret);
115c8afe684SRob Clark 		return ret;
116c8afe684SRob Clark 	}
117c8afe684SRob Clark 
118c8afe684SRob Clark 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
119c8afe684SRob Clark }
120c8afe684SRob Clark 
121c8afe684SRob Clark int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122c8afe684SRob Clark {
123c8afe684SRob Clark 	struct drm_gem_object *obj = vma->vm_private_data;
124c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
125c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
126c8afe684SRob Clark 	struct page **pages;
127c8afe684SRob Clark 	unsigned long pfn;
128c8afe684SRob Clark 	pgoff_t pgoff;
129c8afe684SRob Clark 	int ret;
130c8afe684SRob Clark 
131c8afe684SRob Clark 	/* Make sure we don't parallel update on a fault, nor move or remove
132c8afe684SRob Clark 	 * something from beneath our feet
133c8afe684SRob Clark 	 */
134c8afe684SRob Clark 	ret = mutex_lock_interruptible(&dev->struct_mutex);
135c8afe684SRob Clark 	if (ret)
136c8afe684SRob Clark 		goto out;
137c8afe684SRob Clark 
138c8afe684SRob Clark 	/* make sure we have pages attached now */
139c8afe684SRob Clark 	pages = get_pages(obj);
140c8afe684SRob Clark 	if (IS_ERR(pages)) {
141c8afe684SRob Clark 		ret = PTR_ERR(pages);
142c8afe684SRob Clark 		goto out_unlock;
143c8afe684SRob Clark 	}
144c8afe684SRob Clark 
145c8afe684SRob Clark 	/* We don't use vmf->pgoff since that has the fake offset: */
146c8afe684SRob Clark 	pgoff = ((unsigned long)vmf->virtual_address -
147c8afe684SRob Clark 			vma->vm_start) >> PAGE_SHIFT;
148c8afe684SRob Clark 
149c8afe684SRob Clark 	pfn = page_to_pfn(msm_obj->pages[pgoff]);
150c8afe684SRob Clark 
151c8afe684SRob Clark 	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
152c8afe684SRob Clark 			pfn, pfn << PAGE_SHIFT);
153c8afe684SRob Clark 
154c8afe684SRob Clark 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
155c8afe684SRob Clark 
156c8afe684SRob Clark out_unlock:
157c8afe684SRob Clark 	mutex_unlock(&dev->struct_mutex);
158c8afe684SRob Clark out:
159c8afe684SRob Clark 	switch (ret) {
160c8afe684SRob Clark 	case -EAGAIN:
161c8afe684SRob Clark 		set_need_resched();
162c8afe684SRob Clark 	case 0:
163c8afe684SRob Clark 	case -ERESTARTSYS:
164c8afe684SRob Clark 	case -EINTR:
165c8afe684SRob Clark 		return VM_FAULT_NOPAGE;
166c8afe684SRob Clark 	case -ENOMEM:
167c8afe684SRob Clark 		return VM_FAULT_OOM;
168c8afe684SRob Clark 	default:
169c8afe684SRob Clark 		return VM_FAULT_SIGBUS;
170c8afe684SRob Clark 	}
171c8afe684SRob Clark }
172c8afe684SRob Clark 
173c8afe684SRob Clark /** get mmap offset */
174c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj)
175c8afe684SRob Clark {
176c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
177c8afe684SRob Clark 	int ret;
178c8afe684SRob Clark 
179c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
180c8afe684SRob Clark 
181c8afe684SRob Clark 	/* Make it mmapable */
182c8afe684SRob Clark 	ret = drm_gem_create_mmap_offset(obj);
183c8afe684SRob Clark 
184c8afe684SRob Clark 	if (ret) {
185c8afe684SRob Clark 		dev_err(dev->dev, "could not allocate mmap offset\n");
186c8afe684SRob Clark 		return 0;
187c8afe684SRob Clark 	}
188c8afe684SRob Clark 
189c8afe684SRob Clark 	return drm_vma_node_offset_addr(&obj->vma_node);
190c8afe684SRob Clark }
191c8afe684SRob Clark 
192c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
193c8afe684SRob Clark {
194c8afe684SRob Clark 	uint64_t offset;
195c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
196c8afe684SRob Clark 	offset = mmap_offset(obj);
197c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
198c8afe684SRob Clark 	return offset;
199c8afe684SRob Clark }
200c8afe684SRob Clark 
201c8afe684SRob Clark /* helpers for dealing w/ iommu: */
202c8afe684SRob Clark static int map_range(struct iommu_domain *domain, unsigned int iova,
203c8afe684SRob Clark 		struct sg_table *sgt, unsigned int len, int prot)
204c8afe684SRob Clark {
205c8afe684SRob Clark 	struct scatterlist *sg;
206c8afe684SRob Clark 	unsigned int da = iova;
207c8afe684SRob Clark 	unsigned int i, j;
208c8afe684SRob Clark 	int ret;
209c8afe684SRob Clark 
210c8afe684SRob Clark 	if (!domain || !sgt)
211c8afe684SRob Clark 		return -EINVAL;
212c8afe684SRob Clark 
213c8afe684SRob Clark 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
214c8afe684SRob Clark 		u32 pa = sg_phys(sg) - sg->offset;
215c8afe684SRob Clark 		size_t bytes = sg->length + sg->offset;
216c8afe684SRob Clark 
217c8afe684SRob Clark 		VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
218c8afe684SRob Clark 
219c8afe684SRob Clark 		ret = iommu_map(domain, da, pa, bytes, prot);
220c8afe684SRob Clark 		if (ret)
221c8afe684SRob Clark 			goto fail;
222c8afe684SRob Clark 
223c8afe684SRob Clark 		da += bytes;
224c8afe684SRob Clark 	}
225c8afe684SRob Clark 
226c8afe684SRob Clark 	return 0;
227c8afe684SRob Clark 
228c8afe684SRob Clark fail:
229c8afe684SRob Clark 	da = iova;
230c8afe684SRob Clark 
231c8afe684SRob Clark 	for_each_sg(sgt->sgl, sg, i, j) {
232c8afe684SRob Clark 		size_t bytes = sg->length + sg->offset;
233c8afe684SRob Clark 		iommu_unmap(domain, da, bytes);
234c8afe684SRob Clark 		da += bytes;
235c8afe684SRob Clark 	}
236c8afe684SRob Clark 	return ret;
237c8afe684SRob Clark }
238c8afe684SRob Clark 
239c8afe684SRob Clark static void unmap_range(struct iommu_domain *domain, unsigned int iova,
240c8afe684SRob Clark 		struct sg_table *sgt, unsigned int len)
241c8afe684SRob Clark {
242c8afe684SRob Clark 	struct scatterlist *sg;
243c8afe684SRob Clark 	unsigned int da = iova;
244c8afe684SRob Clark 	int i;
245c8afe684SRob Clark 
246c8afe684SRob Clark 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
247c8afe684SRob Clark 		size_t bytes = sg->length + sg->offset;
248c8afe684SRob Clark 		size_t unmapped;
249c8afe684SRob Clark 
250c8afe684SRob Clark 		unmapped = iommu_unmap(domain, da, bytes);
251c8afe684SRob Clark 		if (unmapped < bytes)
252c8afe684SRob Clark 			break;
253c8afe684SRob Clark 
254c8afe684SRob Clark 		VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
255c8afe684SRob Clark 
256c8afe684SRob Clark 		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
257c8afe684SRob Clark 
258c8afe684SRob Clark 		da += bytes;
259c8afe684SRob Clark 	}
260c8afe684SRob Clark }
261c8afe684SRob Clark 
262c8afe684SRob Clark /* should be called under struct_mutex.. although it can be called
263c8afe684SRob Clark  * from atomic context without struct_mutex to acquire an extra
264c8afe684SRob Clark  * iova ref if you know one is already held.
265c8afe684SRob Clark  *
266c8afe684SRob Clark  * That means when I do eventually need to add support for unpinning
267c8afe684SRob Clark  * the refcnt counter needs to be atomic_t.
268c8afe684SRob Clark  */
269c8afe684SRob Clark int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
270c8afe684SRob Clark 		uint32_t *iova)
271c8afe684SRob Clark {
272c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
273c8afe684SRob Clark 	int ret = 0;
274c8afe684SRob Clark 
275c8afe684SRob Clark 	if (!msm_obj->domain[id].iova) {
276c8afe684SRob Clark 		struct msm_drm_private *priv = obj->dev->dev_private;
277c8afe684SRob Clark 		uint32_t offset = (uint32_t)mmap_offset(obj);
278c8afe684SRob Clark 		struct page **pages;
279c8afe684SRob Clark 		pages = get_pages(obj);
280c8afe684SRob Clark 		if (IS_ERR(pages))
281c8afe684SRob Clark 			return PTR_ERR(pages);
282c8afe684SRob Clark 		// XXX ideally we would not map buffers writable when not needed...
283c8afe684SRob Clark 		ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
284c8afe684SRob Clark 				obj->size, IOMMU_READ | IOMMU_WRITE);
285c8afe684SRob Clark 		msm_obj->domain[id].iova = offset;
286c8afe684SRob Clark 	}
287c8afe684SRob Clark 
288c8afe684SRob Clark 	if (!ret)
289c8afe684SRob Clark 		*iova = msm_obj->domain[id].iova;
290c8afe684SRob Clark 
291c8afe684SRob Clark 	return ret;
292c8afe684SRob Clark }
293c8afe684SRob Clark 
294c8afe684SRob Clark int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
295c8afe684SRob Clark {
296c8afe684SRob Clark 	int ret;
297c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
298c8afe684SRob Clark 	ret = msm_gem_get_iova_locked(obj, id, iova);
299c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
300c8afe684SRob Clark 	return ret;
301c8afe684SRob Clark }
302c8afe684SRob Clark 
303c8afe684SRob Clark void msm_gem_put_iova(struct drm_gem_object *obj, int id)
304c8afe684SRob Clark {
305c8afe684SRob Clark 	// XXX TODO ..
306c8afe684SRob Clark 	// NOTE: probably don't need a _locked() version.. we wouldn't
307c8afe684SRob Clark 	// normally unmap here, but instead just mark that it could be
308c8afe684SRob Clark 	// unmapped (if the iova refcnt drops to zero), but then later
309c8afe684SRob Clark 	// if another _get_iova_locked() fails we can start unmapping
310c8afe684SRob Clark 	// things that are no longer needed..
311c8afe684SRob Clark }
312c8afe684SRob Clark 
313c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
314c8afe684SRob Clark 		struct drm_mode_create_dumb *args)
315c8afe684SRob Clark {
316c8afe684SRob Clark 	args->pitch = align_pitch(args->width, args->bpp);
317c8afe684SRob Clark 	args->size  = PAGE_ALIGN(args->pitch * args->height);
318c8afe684SRob Clark 	return msm_gem_new_handle(dev, file, args->size,
319c8afe684SRob Clark 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
320c8afe684SRob Clark }
321c8afe684SRob Clark 
322c8afe684SRob Clark int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
323c8afe684SRob Clark 		uint32_t handle)
324c8afe684SRob Clark {
325c8afe684SRob Clark 	/* No special work needed, drop the reference and see what falls out */
326c8afe684SRob Clark 	return drm_gem_handle_delete(file, handle);
327c8afe684SRob Clark }
328c8afe684SRob Clark 
329c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
330c8afe684SRob Clark 		uint32_t handle, uint64_t *offset)
331c8afe684SRob Clark {
332c8afe684SRob Clark 	struct drm_gem_object *obj;
333c8afe684SRob Clark 	int ret = 0;
334c8afe684SRob Clark 
335c8afe684SRob Clark 	/* GEM does all our handle to object mapping */
336c8afe684SRob Clark 	obj = drm_gem_object_lookup(dev, file, handle);
337c8afe684SRob Clark 	if (obj == NULL) {
338c8afe684SRob Clark 		ret = -ENOENT;
339c8afe684SRob Clark 		goto fail;
340c8afe684SRob Clark 	}
341c8afe684SRob Clark 
342c8afe684SRob Clark 	*offset = msm_gem_mmap_offset(obj);
343c8afe684SRob Clark 
344c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
345c8afe684SRob Clark 
346c8afe684SRob Clark fail:
347c8afe684SRob Clark 	return ret;
348c8afe684SRob Clark }
349c8afe684SRob Clark 
350c8afe684SRob Clark void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
351c8afe684SRob Clark {
352c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
353c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
354c8afe684SRob Clark 	if (!msm_obj->vaddr) {
355c8afe684SRob Clark 		struct page **pages = get_pages(obj);
356c8afe684SRob Clark 		if (IS_ERR(pages))
357c8afe684SRob Clark 			return ERR_CAST(pages);
358c8afe684SRob Clark 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
359c8afe684SRob Clark 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
360c8afe684SRob Clark 	}
361c8afe684SRob Clark 	return msm_obj->vaddr;
362c8afe684SRob Clark }
363c8afe684SRob Clark 
364c8afe684SRob Clark void *msm_gem_vaddr(struct drm_gem_object *obj)
365c8afe684SRob Clark {
366c8afe684SRob Clark 	void *ret;
367c8afe684SRob Clark 	mutex_lock(&obj->dev->struct_mutex);
368c8afe684SRob Clark 	ret = msm_gem_vaddr_locked(obj);
369c8afe684SRob Clark 	mutex_unlock(&obj->dev->struct_mutex);
370c8afe684SRob Clark 	return ret;
371c8afe684SRob Clark }
372c8afe684SRob Clark 
373c8afe684SRob Clark int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
374c8afe684SRob Clark 		struct work_struct *work)
375c8afe684SRob Clark {
376c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
377c8afe684SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
378c8afe684SRob Clark 
379c8afe684SRob Clark 	/* just a place-holder until we have gpu.. */
380c8afe684SRob Clark 	queue_work(priv->wq, work);
381c8afe684SRob Clark 
382c8afe684SRob Clark 	return 0;
383c8afe684SRob Clark }
384c8afe684SRob Clark 
385c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS
386c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
387c8afe684SRob Clark {
388c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
389c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
390c8afe684SRob Clark 	uint64_t off = drm_vma_node_start(&obj->vma_node);
391c8afe684SRob Clark 
392c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
393c8afe684SRob Clark 	seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n",
394c8afe684SRob Clark 			msm_obj->flags, obj->name, obj->refcount.refcount.counter,
395c8afe684SRob Clark 			off, msm_obj->vaddr, obj->size);
396c8afe684SRob Clark }
397c8afe684SRob Clark 
398c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
399c8afe684SRob Clark {
400c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
401c8afe684SRob Clark 	int count = 0;
402c8afe684SRob Clark 	size_t size = 0;
403c8afe684SRob Clark 
404c8afe684SRob Clark 	list_for_each_entry(msm_obj, list, mm_list) {
405c8afe684SRob Clark 		struct drm_gem_object *obj = &msm_obj->base;
406c8afe684SRob Clark 		seq_printf(m, "   ");
407c8afe684SRob Clark 		msm_gem_describe(obj, m);
408c8afe684SRob Clark 		count++;
409c8afe684SRob Clark 		size += obj->size;
410c8afe684SRob Clark 	}
411c8afe684SRob Clark 
412c8afe684SRob Clark 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
413c8afe684SRob Clark }
414c8afe684SRob Clark #endif
415c8afe684SRob Clark 
416c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj)
417c8afe684SRob Clark {
418c8afe684SRob Clark 	struct drm_device *dev = obj->dev;
419c8afe684SRob Clark 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
420c8afe684SRob Clark 	int id;
421c8afe684SRob Clark 
422c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
423c8afe684SRob Clark 
424c8afe684SRob Clark 	list_del(&msm_obj->mm_list);
425c8afe684SRob Clark 
426c8afe684SRob Clark 	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
427c8afe684SRob Clark 		if (msm_obj->domain[id].iova) {
428c8afe684SRob Clark 			struct msm_drm_private *priv = obj->dev->dev_private;
429c8afe684SRob Clark 			uint32_t offset = (uint32_t)mmap_offset(obj);
430c8afe684SRob Clark 			unmap_range(priv->iommus[id], offset,
431c8afe684SRob Clark 					msm_obj->sgt, obj->size);
432c8afe684SRob Clark 		}
433c8afe684SRob Clark 	}
434c8afe684SRob Clark 
435c8afe684SRob Clark 	drm_gem_free_mmap_offset(obj);
436c8afe684SRob Clark 
437c8afe684SRob Clark 	if (msm_obj->vaddr)
438c8afe684SRob Clark 		vunmap(msm_obj->vaddr);
439c8afe684SRob Clark 
440c8afe684SRob Clark 	put_pages(obj);
441c8afe684SRob Clark 
442c8afe684SRob Clark 	drm_gem_object_release(obj);
443c8afe684SRob Clark 
444c8afe684SRob Clark 	kfree(msm_obj);
445c8afe684SRob Clark }
446c8afe684SRob Clark 
447c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */
448c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
449c8afe684SRob Clark 		uint32_t size, uint32_t flags, uint32_t *handle)
450c8afe684SRob Clark {
451c8afe684SRob Clark 	struct drm_gem_object *obj;
452c8afe684SRob Clark 	int ret;
453c8afe684SRob Clark 
454c8afe684SRob Clark 	ret = mutex_lock_interruptible(&dev->struct_mutex);
455c8afe684SRob Clark 	if (ret)
456c8afe684SRob Clark 		return ret;
457c8afe684SRob Clark 
458c8afe684SRob Clark 	obj = msm_gem_new(dev, size, flags);
459c8afe684SRob Clark 
460c8afe684SRob Clark 	mutex_unlock(&dev->struct_mutex);
461c8afe684SRob Clark 
462c8afe684SRob Clark 	if (IS_ERR(obj))
463c8afe684SRob Clark 		return PTR_ERR(obj);
464c8afe684SRob Clark 
465c8afe684SRob Clark 	ret = drm_gem_handle_create(file, obj, handle);
466c8afe684SRob Clark 
467c8afe684SRob Clark 	/* drop reference from allocate - handle holds it now */
468c8afe684SRob Clark 	drm_gem_object_unreference_unlocked(obj);
469c8afe684SRob Clark 
470c8afe684SRob Clark 	return ret;
471c8afe684SRob Clark }
472c8afe684SRob Clark 
473c8afe684SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev,
474c8afe684SRob Clark 		uint32_t size, uint32_t flags)
475c8afe684SRob Clark {
476c8afe684SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
477c8afe684SRob Clark 	struct msm_gem_object *msm_obj;
478c8afe684SRob Clark 	struct drm_gem_object *obj = NULL;
479c8afe684SRob Clark 	int ret;
480c8afe684SRob Clark 
481c8afe684SRob Clark 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
482c8afe684SRob Clark 
483c8afe684SRob Clark 	size = PAGE_ALIGN(size);
484c8afe684SRob Clark 
485c8afe684SRob Clark 	switch (flags & MSM_BO_CACHE_MASK) {
486c8afe684SRob Clark 	case MSM_BO_UNCACHED:
487c8afe684SRob Clark 	case MSM_BO_CACHED:
488c8afe684SRob Clark 	case MSM_BO_WC:
489c8afe684SRob Clark 		break;
490c8afe684SRob Clark 	default:
491c8afe684SRob Clark 		dev_err(dev->dev, "invalid cache flag: %x\n",
492c8afe684SRob Clark 				(flags & MSM_BO_CACHE_MASK));
493c8afe684SRob Clark 		ret = -EINVAL;
494c8afe684SRob Clark 		goto fail;
495c8afe684SRob Clark 	}
496c8afe684SRob Clark 
497c8afe684SRob Clark 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
498c8afe684SRob Clark 	if (!msm_obj) {
499c8afe684SRob Clark 		ret = -ENOMEM;
500c8afe684SRob Clark 		goto fail;
501c8afe684SRob Clark 	}
502c8afe684SRob Clark 
503c8afe684SRob Clark 	obj = &msm_obj->base;
504c8afe684SRob Clark 
505c8afe684SRob Clark 	ret = drm_gem_object_init(dev, obj, size);
506c8afe684SRob Clark 	if (ret)
507c8afe684SRob Clark 		goto fail;
508c8afe684SRob Clark 
509c8afe684SRob Clark 	msm_obj->flags = flags;
510c8afe684SRob Clark 
511c8afe684SRob Clark 
512c8afe684SRob Clark 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
513c8afe684SRob Clark 
514c8afe684SRob Clark 	return obj;
515c8afe684SRob Clark 
516c8afe684SRob Clark fail:
517c8afe684SRob Clark 	if (obj)
518c8afe684SRob Clark 		drm_gem_object_unreference_unlocked(obj);
519c8afe684SRob Clark 
520c8afe684SRob Clark 	return ERR_PTR(ret);
521c8afe684SRob Clark }
522